aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2010-04-09 10:17:41 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2010-04-09 10:21:12 -0400
commit87d8a69709d971913e6cc7210450fcb8be963667 (patch)
tree4f8eb95c588f7df84554dcf97d67540664333a7b /net
parent0b8973a81876d90f916507ac40d1381068dc986a (diff)
parent2eaa9cfdf33b8d7fb7aff27792192e0019ae8fc6 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c6
-rw-r--r--net/8021q/vlan.h2
-rw-r--r--net/8021q/vlan_core.c6
-rw-r--r--net/8021q/vlan_dev.c82
-rw-r--r--net/8021q/vlanproc.c2
-rw-r--r--net/9p/client.c167
-rw-r--r--net/9p/protocol.c74
-rw-r--r--net/9p/protocol.h6
-rw-r--r--net/9p/trans_fd.c122
-rw-r--r--net/9p/trans_rdma.c9
-rw-r--r--net/9p/trans_virtio.c146
-rw-r--r--net/appletalk/aarp.c2
-rw-r--r--net/appletalk/atalk_proc.c30
-rw-r--r--net/atm/addr.c2
-rw-r--r--net/atm/atm_misc.c40
-rw-r--r--net/atm/atm_sysfs.c27
-rw-r--r--net/atm/br2684.c90
-rw-r--r--net/atm/clip.c86
-rw-r--r--net/atm/common.c386
-rw-r--r--net/atm/ioctl.c196
-rw-r--r--net/atm/lec.c599
-rw-r--r--net/atm/mpc.c540
-rw-r--r--net/atm/mpoa_caches.c190
-rw-r--r--net/atm/mpoa_proc.c89
-rw-r--r--net/atm/pppoatm.c28
-rw-r--r--net/atm/proc.c83
-rw-r--r--net/atm/pvc.c43
-rw-r--r--net/atm/raw.c26
-rw-r--r--net/atm/resources.c418
-rw-r--r--net/atm/signaling.c219
-rw-r--r--net/atm/svc.c258
-rw-r--r--net/ax25/af_ax25.c18
-rw-r--r--net/ax25/ax25_out.c6
-rw-r--r--net/ax25/ax25_uid.c25
-rw-r--r--net/bluetooth/bnep/netdev.c6
-rw-r--r--net/bluetooth/cmtp/capi.c37
-rw-r--r--net/bluetooth/hci_conn.c3
-rw-r--r--net/bluetooth/hci_core.c12
-rw-r--r--net/bluetooth/hci_event.c2
-rw-r--r--net/bluetooth/hci_sock.c4
-rw-r--r--net/bluetooth/hci_sysfs.c124
-rw-r--r--net/bluetooth/hidp/core.c132
-rw-r--r--net/bluetooth/hidp/hidp.h4
-rw-r--r--net/bluetooth/l2cap.c65
-rw-r--r--net/bluetooth/rfcomm/core.c47
-rw-r--r--net/bluetooth/rfcomm/sock.c36
-rw-r--r--net/bluetooth/sco.c36
-rw-r--r--net/bridge/Kconfig14
-rw-r--r--net/bridge/Makefile2
-rw-r--r--net/bridge/br_device.c23
-rw-r--r--net/bridge/br_forward.c166
-rw-r--r--net/bridge/br_if.c8
-rw-r--r--net/bridge/br_input.c41
-rw-r--r--net/bridge/br_multicast.c1309
-rw-r--r--net/bridge/br_private.h185
-rw-r--r--net/bridge/br_stp.c2
-rw-r--r--net/bridge/br_stp_if.c1
-rw-r--r--net/bridge/br_sysfs_br.c281
-rw-r--r--net/bridge/br_sysfs_if.c20
-rw-r--r--net/bridge/netfilter/ebt_802_3.c2
-rw-r--r--net/bridge/netfilter/ebt_arp.c2
-rw-r--r--net/bridge/netfilter/ebt_arpreply.c2
-rw-r--r--net/bridge/netfilter/ebt_dnat.c2
-rw-r--r--net/bridge/netfilter/ebt_ip.c2
-rw-r--r--net/bridge/netfilter/ebt_ip6.c2
-rw-r--r--net/bridge/netfilter/ebt_limit.c18
-rw-r--r--net/bridge/netfilter/ebt_log.c2
-rw-r--r--net/bridge/netfilter/ebt_mark.c33
-rw-r--r--net/bridge/netfilter/ebt_mark_m.c39
-rw-r--r--net/bridge/netfilter/ebt_nflog.c2
-rw-r--r--net/bridge/netfilter/ebt_pkttype.c2
-rw-r--r--net/bridge/netfilter/ebt_redirect.c2
-rw-r--r--net/bridge/netfilter/ebt_snat.c2
-rw-r--r--net/bridge/netfilter/ebt_stp.c2
-rw-r--r--net/bridge/netfilter/ebt_ulog.c2
-rw-r--r--net/bridge/netfilter/ebt_vlan.c2
-rw-r--r--net/bridge/netfilter/ebtable_broute.c2
-rw-r--r--net/bridge/netfilter/ebtable_filter.c2
-rw-r--r--net/bridge/netfilter/ebtable_nat.c2
-rw-r--r--net/bridge/netfilter/ebtables.c1247
-rw-r--r--net/can/af_can.c124
-rw-r--r--net/can/af_can.h4
-rw-r--r--net/can/proc.c93
-rw-r--r--net/core/dev.c305
-rw-r--r--net/core/dev_mcast.c5
-rw-r--r--net/core/drop_monitor.c1
-rw-r--r--net/core/dst.c2
-rw-r--r--net/core/ethtool.c434
-rw-r--r--net/core/fib_rules.c2
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/neighbour.c20
-rw-r--r--net/core/net-sysfs.c3
-rw-r--r--net/core/netpoll.c178
-rw-r--r--net/core/pktgen.c13
-rw-r--r--net/core/rtnetlink.c141
-rw-r--r--net/core/scm.c2
-rw-r--r--net/core/sock.c42
-rw-r--r--net/dcb/dcbnl.c16
-rw-r--r--net/dccp/ccid.c29
-rw-r--r--net/dccp/ccid.h6
-rw-r--r--net/dccp/ipv4.c12
-rw-r--r--net/dccp/ipv6.c12
-rw-r--r--net/dccp/minisocks.c2
-rw-r--r--net/dccp/probe.c23
-rw-r--r--net/dccp/proto.c23
-rw-r--r--net/decnet/dn_route.c14
-rw-r--r--net/ethernet/eth.c6
-rw-r--r--net/ipv4/Kconfig6
-rw-r--r--net/ipv4/af_inet.c46
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/arp.c58
-rw-r--r--net/ipv4/devinet.c40
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_frontend.c6
-rw-r--r--net/ipv4/fib_semantics.c80
-rw-r--r--net/ipv4/fib_trie.c4
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c89
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/ip_fragment.c39
-rw-r--r--net/ipv4/ip_gre.c36
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_sockglue.c14
-rw-r--r--net/ipv4/ipcomp.c17
-rw-r--r--net/ipv4/ipconfig.c59
-rw-r--r--net/ipv4/ipip.c20
-rw-r--r--net/ipv4/ipmr.c17
-rw-r--r--net/ipv4/netfilter/arp_tables.c390
-rw-r--r--net/ipv4/netfilter/arptable_filter.c95
-rw-r--r--net/ipv4/netfilter/ip_tables.c561
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c14
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c4
-rw-r--r--net/ipv4/netfilter/iptable_filter.c124
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c166
-rw-r--r--net/ipv4/netfilter/iptable_raw.c96
-rw-r--r--net/ipv4/netfilter/iptable_security.c117
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c5
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c4
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c11
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c34
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c40
-rw-r--r--net/ipv4/netfilter/nf_nat_ftp.c105
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c39
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c41
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c154
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c31
-rw-r--r--net/ipv4/proc.c34
-rw-r--r--net/ipv4/route.c129
-rw-r--r--net/ipv4/syncookies.c30
-rw-r--r--net/ipv4/sysctl_net_ipv4.c14
-rw-r--r--net/ipv4/tcp.c130
-rw-r--r--net/ipv4/tcp_input.c45
-rw-r--r--net/ipv4/tcp_ipv4.c60
-rw-r--r--net/ipv4/tcp_minisocks.c12
-rw-r--r--net/ipv4/tcp_output.c58
-rw-r--r--net/ipv4/tcp_probe.c19
-rw-r--r--net/ipv4/tcp_timer.c29
-rw-r--r--net/ipv4/udp.c32
-rw-r--r--net/ipv4/udplite.c4
-rw-r--r--net/ipv4/xfrm4_policy.c19
-rw-r--r--net/ipv6/addrconf.c160
-rw-r--r--net/ipv6/addrconf_core.c2
-rw-r--r--net/ipv6/af_inet6.c32
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/anycast.c2
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/exthdrs.c9
-rw-r--r--net/ipv6/fib6_rules.c15
-rw-r--r--net/ipv6/icmp.c12
-rw-r--r--net/ipv6/ip6_fib.c52
-rw-r--r--net/ipv6/ip6_flowlabel.c9
-rw-r--r--net/ipv6/ip6_input.c3
-rw-r--r--net/ipv6/ip6_output.c24
-rw-r--r--net/ipv6/ip6_tunnel.c43
-rw-r--r--net/ipv6/ip6mr.c14
-rw-r--r--net/ipv6/ipcomp6.c21
-rw-r--r--net/ipv6/mcast.c32
-rw-r--r--net/ipv6/mip6.c2
-rw-r--r--net/ipv6/ndisc.c7
-rw-r--r--net/ipv6/netfilter/ip6_tables.c563
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c2
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c113
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c141
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c86
-rw-r--r--net/ipv6/netfilter/ip6table_security.c109
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c27
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c11
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c23
-rw-r--r--net/ipv6/proc.c39
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/reassembly.c35
-rw-r--r--net/ipv6/route.c39
-rw-r--r--net/ipv6/sit.c25
-rw-r--r--net/ipv6/syncookies.c31
-rw-r--r--net/ipv6/sysctl_net_ipv6.c4
-rw-r--r--net/ipv6/tcp_ipv6.c32
-rw-r--r--net/ipv6/tunnel6.c4
-rw-r--r--net/ipv6/udp.c50
-rw-r--r--net/ipv6/udplite.c4
-rw-r--r--net/ipv6/xfrm6_input.c2
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/ipv6/xfrm6_policy.c28
-rw-r--r--net/ipv6/xfrm6_tunnel.c194
-rw-r--r--net/ipx/ipx_proc.c90
-rw-r--r--net/irda/ircomm/ircomm_tty.c6
-rw-r--r--net/irda/irlan/irlan_common.c28
-rw-r--r--net/irda/irlan/irlan_eth.c5
-rw-r--r--net/irda/irnet/irnet_ppp.c5
-rw-r--r--net/irda/irnetlink.c2
-rw-r--r--net/key/af_key.c183
-rw-r--r--net/llc/af_llc.c64
-rw-r--r--net/llc/llc_c_ac.c2
-rw-r--r--net/llc/llc_conn.c146
-rw-r--r--net/llc/llc_core.c53
-rw-r--r--net/llc/llc_output.c45
-rw-r--r--net/llc/llc_proc.c69
-rw-r--r--net/llc/llc_sap.c111
-rw-r--r--net/mac80211/Kconfig12
-rw-r--r--net/mac80211/Makefile4
-rw-r--r--net/mac80211/agg-rx.c15
-rw-r--r--net/mac80211/agg-tx.c38
-rw-r--r--net/mac80211/cfg.c201
-rw-r--r--net/mac80211/debugfs.c127
-rw-r--r--net/mac80211/debugfs_key.c2
-rw-r--r--net/mac80211/debugfs_netdev.c216
-rw-r--r--net/mac80211/debugfs_netdev.h9
-rw-r--r--net/mac80211/debugfs_sta.c88
-rw-r--r--net/mac80211/driver-ops.h169
-rw-r--r--net/mac80211/driver-trace.h176
-rw-r--r--net/mac80211/ht.c78
-rw-r--r--net/mac80211/ibss.c131
-rw-r--r--net/mac80211/ieee80211_i.h215
-rw-r--r--net/mac80211/iface.c153
-rw-r--r--net/mac80211/key.c10
-rw-r--r--net/mac80211/key.h8
-rw-r--r--net/mac80211/main.c76
-rw-r--r--net/mac80211/mesh.c6
-rw-r--r--net/mac80211/mesh_hwmp.c20
-rw-r--r--net/mac80211/mesh_pathtbl.c6
-rw-r--r--net/mac80211/mesh_plink.c25
-rw-r--r--net/mac80211/mlme.c1347
-rw-r--r--net/mac80211/offchannel.c170
-rw-r--r--net/mac80211/pm.c18
-rw-r--r--net/mac80211/rate.c93
-rw-r--r--net/mac80211/rate.h19
-rw-r--r--net/mac80211/rc80211_pid_algo.c10
-rw-r--r--net/mac80211/rx.c475
-rw-r--r--net/mac80211/scan.c257
-rw-r--r--net/mac80211/spectmgmt.c4
-rw-r--r--net/mac80211/sta_info.c778
-rw-r--r--net/mac80211/sta_info.h68
-rw-r--r--net/mac80211/status.c107
-rw-r--r--net/mac80211/tkip.c47
-rw-r--r--net/mac80211/tx.c377
-rw-r--r--net/mac80211/util.c341
-rw-r--r--net/mac80211/wep.c17
-rw-r--r--net/mac80211/wme.c96
-rw-r--r--net/mac80211/wme.h8
-rw-r--r--net/mac80211/work.c1100
-rw-r--r--net/mac80211/wpa.c57
-rw-r--r--net/netfilter/Kconfig25
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipvs/Kconfig14
-rw-r--r--net/netfilter/ipvs/Makefile1
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c42
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c68
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c31
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c44
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c1183
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c15
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c10
-rw-r--r--net/netfilter/nf_conntrack_core.c260
-rw-r--r--net/netfilter/nf_conntrack_expect.c35
-rw-r--r--net/netfilter/nf_conntrack_extend.c1
-rw-r--r--net/netfilter/nf_conntrack_ftp.c18
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c3
-rw-r--r--net/netfilter/nf_conntrack_helper.c46
-rw-r--r--net/netfilter/nf_conntrack_netlink.c235
-rw-r--r--net/netfilter/nf_conntrack_pptp.c14
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c5
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c4
-rw-r--r--net/netfilter/nf_conntrack_sip.c336
-rw-r--r--net/netfilter/nf_conntrack_standalone.c13
-rw-r--r--net/netfilter/nf_queue.c2
-rw-r--r--net/netfilter/nfnetlink.c65
-rw-r--r--net/netfilter/nfnetlink_log.c5
-rw-r--r--net/netfilter/nfnetlink_queue.c7
-rw-r--r--net/netfilter/x_tables.c80
-rw-r--r--net/netfilter/xt_CT.c164
-rw-r--r--net/netfilter/xt_NFQUEUE.c6
-rw-r--r--net/netfilter/xt_RATEEST.c7
-rw-r--r--net/netfilter/xt_TCPMSS.c30
-rw-r--r--net/netfilter/xt_connlimit.c27
-rw-r--r--net/netfilter/xt_hashlimit.c221
-rw-r--r--net/netfilter/xt_limit.c4
-rw-r--r--net/netfilter/xt_osf.c4
-rw-r--r--net/netfilter/xt_recent.c168
-rw-r--r--net/netfilter/xt_repldata.h35
-rw-r--r--net/netlabel/netlabel_domainhash.c1
-rw-r--r--net/netlabel/netlabel_unlabeled.c3
-rw-r--r--net/netlink/af_netlink.c29
-rw-r--r--net/netlink/genetlink.c4
-rw-r--r--net/netrom/af_netrom.c21
-rw-r--r--net/netrom/nr_route.c64
-rw-r--r--net/packet/Kconfig10
-rw-r--r--net/packet/af_packet.c394
-rw-r--r--net/phonet/datagram.c6
-rw-r--r--net/phonet/pep-gprs.c4
-rw-r--r--net/phonet/pep.c29
-rw-r--r--net/phonet/pn_dev.c7
-rw-r--r--net/phonet/pn_netlink.c3
-rw-r--r--net/rds/ib.c4
-rw-r--r--net/rds/iw.c4
-rw-r--r--net/rds/tcp_connect.c7
-rw-r--r--net/rds/tcp_listen.c6
-rw-r--r--net/rds/tcp_send.c4
-rw-r--r--net/rfkill/input.c8
-rw-r--r--net/rose/af_rose.c22
-rw-r--r--net/rose/rose_link.c8
-rw-r--r--net/rose/rose_loopback.c2
-rw-r--r--net/rose/rose_route.c5
-rw-r--r--net/rxrpc/ar-accept.c6
-rw-r--r--net/sched/Kconfig21
-rw-r--r--net/sched/cls_cgroup.c36
-rw-r--r--net/sched/sch_api.c1
-rw-r--r--net/sched/sch_fifo.c34
-rw-r--r--net/sctp/bind_addr.c1
-rw-r--r--net/sctp/input.c42
-rw-r--r--net/sctp/ipv6.c1
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sctp/protocol.c6
-rw-r--r--net/sctp/sm_sideeffect.c2
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/socket.c129
-rw-r--r--net/sunrpc/addr.c8
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c33
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c4
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c2
-rw-r--r--net/sunrpc/bc_svc.c15
-rw-r--r--net/sunrpc/clnt.c1
-rw-r--r--net/sunrpc/rpc_pipe.c13
-rw-r--r--net/sunrpc/sched.c15
-rw-r--r--net/sunrpc/svc.c6
-rw-r--r--net/sunrpc/svc_xprt.c57
-rw-r--r--net/sunrpc/svcauth_unix.c102
-rw-r--r--net/sunrpc/svcsock.c3
-rw-r--r--net/sunrpc/xprt.c22
-rw-r--r--net/sunrpc/xprtrdma/transport.c7
-rw-r--r--net/sunrpc/xprtsock.c29
-rw-r--r--net/sysctl_net.c4
-rw-r--r--net/tipc/Kconfig75
-rw-r--r--net/tipc/bearer.c37
-rw-r--r--net/tipc/bearer.h2
-rw-r--r--net/tipc/core.c10
-rw-r--r--net/tipc/link.c9
-rw-r--r--net/tipc/net.c25
-rw-r--r--net/tipc/ref.c26
-rw-r--r--net/tipc/socket.c6
-rw-r--r--net/tipc/subscr.c57
-rw-r--r--net/tipc/subscr.h2
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/unix/sysctl_net_unix.c2
-rw-r--r--net/wimax/op-msg.c3
-rw-r--r--net/wimax/op-reset.c3
-rw-r--r--net/wimax/op-rfkill.c3
-rw-r--r--net/wimax/op-state-get.c3
-rw-r--r--net/wimax/stack.c3
-rw-r--r--net/wireless/.gitignore1
-rw-r--r--net/wireless/Kconfig13
-rw-r--r--net/wireless/Makefile6
-rw-r--r--net/wireless/chan.c41
-rw-r--r--net/wireless/core.c63
-rw-r--r--net/wireless/core.h20
-rw-r--r--net/wireless/db.txt17
-rw-r--r--net/wireless/genregdb.awk118
-rw-r--r--net/wireless/lib80211_crypt_ccmp.c2
-rw-r--r--net/wireless/lib80211_crypt_tkip.c23
-rw-r--r--net/wireless/mlme.c227
-rw-r--r--net/wireless/nl80211.c866
-rw-r--r--net/wireless/nl80211.h23
-rw-r--r--net/wireless/radiotap.c305
-rw-r--r--net/wireless/reg.c689
-rw-r--r--net/wireless/reg.h29
-rw-r--r--net/wireless/regdb.h7
-rw-r--r--net/wireless/scan.c171
-rw-r--r--net/wireless/sme.c42
-rw-r--r--net/wireless/sysfs.c20
-rw-r--r--net/wireless/util.c137
-rw-r--r--net/wireless/wext-compat.c49
-rw-r--r--net/wireless/wext-proc.c4
-rw-r--r--net/x25/af_x25.c24
-rw-r--r--net/x25/x25_dev.c2
-rw-r--r--net/x25/x25_proc.c114
-rw-r--r--net/xfrm/xfrm_algo.c16
-rw-r--r--net/xfrm/xfrm_input.c2
-rw-r--r--net/xfrm/xfrm_ipcomp.c16
-rw-r--r--net/xfrm/xfrm_policy.c127
-rw-r--r--net/xfrm/xfrm_proc.c6
-rw-r--r--net/xfrm/xfrm_state.c106
-rw-r--r--net/xfrm/xfrm_sysctl.c4
-rw-r--r--net/xfrm/xfrm_user.c125
410 files changed, 20581 insertions, 10285 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 33f90e7362cc..db783d7af5a3 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -378,6 +378,8 @@ static void vlan_transfer_features(struct net_device *dev,
378#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 378#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
379 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; 379 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
380#endif 380#endif
381 vlandev->real_num_tx_queues = dev->real_num_tx_queues;
382 BUG_ON(vlandev->real_num_tx_queues > vlandev->num_tx_queues);
381 383
382 if (old_features != vlandev->features) 384 if (old_features != vlandev->features)
383 netdev_features_change(vlandev); 385 netdev_features_change(vlandev);
@@ -663,7 +665,7 @@ out:
663 return err; 665 return err;
664} 666}
665 667
666static int vlan_init_net(struct net *net) 668static int __net_init vlan_init_net(struct net *net)
667{ 669{
668 struct vlan_net *vn = net_generic(net, vlan_net_id); 670 struct vlan_net *vn = net_generic(net, vlan_net_id);
669 int err; 671 int err;
@@ -675,7 +677,7 @@ static int vlan_init_net(struct net *net)
675 return err; 677 return err;
676} 678}
677 679
678static void vlan_exit_net(struct net *net) 680static void __net_exit vlan_exit_net(struct net *net)
679{ 681{
680 vlan_proc_cleanup(net); 682 vlan_proc_cleanup(net);
681} 683}
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 5685296017e9..6abdcac1b2e8 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -61,7 +61,7 @@ struct vlan_dev_info {
61 struct proc_dir_entry *dent; 61 struct proc_dir_entry *dent;
62 unsigned long cnt_inc_headroom_on_tx; 62 unsigned long cnt_inc_headroom_on_tx;
63 unsigned long cnt_encap_on_xmit; 63 unsigned long cnt_encap_on_xmit;
64 struct vlan_rx_stats *vlan_rx_stats; 64 struct vlan_rx_stats __percpu *vlan_rx_stats;
65}; 65};
66 66
67static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) 67static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index e75a2f3b10af..c584a0af77d3 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -11,9 +11,10 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
11 if (netpoll_rx(skb)) 11 if (netpoll_rx(skb))
12 return NET_RX_DROP; 12 return NET_RX_DROP;
13 13
14 if (skb_bond_should_drop(skb)) 14 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
15 goto drop; 15 goto drop;
16 16
17 skb->skb_iif = skb->dev->ifindex;
17 __vlan_hwaccel_put_tag(skb, vlan_tci); 18 __vlan_hwaccel_put_tag(skb, vlan_tci);
18 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 19 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
19 20
@@ -82,9 +83,10 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
82{ 83{
83 struct sk_buff *p; 84 struct sk_buff *p;
84 85
85 if (skb_bond_should_drop(skb)) 86 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
86 goto drop; 87 goto drop;
87 88
89 skb->skb_iif = skb->dev->ifindex;
88 __vlan_hwaccel_put_tag(skb, vlan_tci); 90 __vlan_hwaccel_put_tag(skb, vlan_tci);
89 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 91 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
90 92
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index b7889782047e..2fd057c81bbf 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -163,7 +163,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
163 goto err_unlock; 163 goto err_unlock;
164 } 164 }
165 165
166 rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, 166 rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats,
167 smp_processor_id()); 167 smp_processor_id());
168 rx_stats->rx_packets++; 168 rx_stats->rx_packets++;
169 rx_stats->rx_bytes += skb->len; 169 rx_stats->rx_bytes += skb->len;
@@ -263,11 +263,10 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
263 vhdr->h_vlan_TCI = htons(vlan_tci); 263 vhdr->h_vlan_TCI = htons(vlan_tci);
264 264
265 /* 265 /*
266 * Set the protocol type. For a packet of type ETH_P_802_3 we 266 * Set the protocol type. For a packet of type ETH_P_802_3/2 we
267 * put the length in here instead. It is up to the 802.2 267 * put the length in here instead.
268 * layer to carry protocol information.
269 */ 268 */
270 if (type != ETH_P_802_3) 269 if (type != ETH_P_802_3 && type != ETH_P_802_2)
271 vhdr->h_vlan_encapsulated_proto = htons(type); 270 vhdr->h_vlan_encapsulated_proto = htons(type);
272 else 271 else
273 vhdr->h_vlan_encapsulated_proto = htons(len); 272 vhdr->h_vlan_encapsulated_proto = htons(len);
@@ -323,7 +322,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
323 } 322 }
324 323
325 324
326 skb->dev = vlan_dev_info(dev)->real_dev; 325 skb_set_dev(skb, vlan_dev_info(dev)->real_dev);
327 len = skb->len; 326 len = skb->len;
328 ret = dev_queue_xmit(skb); 327 ret = dev_queue_xmit(skb);
329 328
@@ -362,6 +361,14 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
362 return ret; 361 return ret;
363} 362}
364 363
364static u16 vlan_dev_select_queue(struct net_device *dev, struct sk_buff *skb)
365{
366 struct net_device *rdev = vlan_dev_info(dev)->real_dev;
367 const struct net_device_ops *ops = rdev->netdev_ops;
368
369 return ops->ndo_select_queue(rdev, skb);
370}
371
365static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) 372static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
366{ 373{
367 /* TODO: gotta make sure the underlying layer can handle it, 374 /* TODO: gotta make sure the underlying layer can handle it,
@@ -689,7 +696,8 @@ static const struct header_ops vlan_header_ops = {
689 .parse = eth_header_parse, 696 .parse = eth_header_parse,
690}; 697};
691 698
692static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops; 699static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops,
700 vlan_netdev_ops_sq, vlan_netdev_accel_ops_sq;
693 701
694static int vlan_dev_init(struct net_device *dev) 702static int vlan_dev_init(struct net_device *dev)
695{ 703{
@@ -723,11 +731,17 @@ static int vlan_dev_init(struct net_device *dev)
723 if (real_dev->features & NETIF_F_HW_VLAN_TX) { 731 if (real_dev->features & NETIF_F_HW_VLAN_TX) {
724 dev->header_ops = real_dev->header_ops; 732 dev->header_ops = real_dev->header_ops;
725 dev->hard_header_len = real_dev->hard_header_len; 733 dev->hard_header_len = real_dev->hard_header_len;
726 dev->netdev_ops = &vlan_netdev_accel_ops; 734 if (real_dev->netdev_ops->ndo_select_queue)
735 dev->netdev_ops = &vlan_netdev_accel_ops_sq;
736 else
737 dev->netdev_ops = &vlan_netdev_accel_ops;
727 } else { 738 } else {
728 dev->header_ops = &vlan_header_ops; 739 dev->header_ops = &vlan_header_ops;
729 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; 740 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
730 dev->netdev_ops = &vlan_netdev_ops; 741 if (real_dev->netdev_ops->ndo_select_queue)
742 dev->netdev_ops = &vlan_netdev_ops_sq;
743 else
744 dev->netdev_ops = &vlan_netdev_ops;
731 } 745 }
732 746
733 if (is_vlan_dev(real_dev)) 747 if (is_vlan_dev(real_dev))
@@ -866,6 +880,56 @@ static const struct net_device_ops vlan_netdev_accel_ops = {
866#endif 880#endif
867}; 881};
868 882
883static const struct net_device_ops vlan_netdev_ops_sq = {
884 .ndo_select_queue = vlan_dev_select_queue,
885 .ndo_change_mtu = vlan_dev_change_mtu,
886 .ndo_init = vlan_dev_init,
887 .ndo_uninit = vlan_dev_uninit,
888 .ndo_open = vlan_dev_open,
889 .ndo_stop = vlan_dev_stop,
890 .ndo_start_xmit = vlan_dev_hard_start_xmit,
891 .ndo_validate_addr = eth_validate_addr,
892 .ndo_set_mac_address = vlan_dev_set_mac_address,
893 .ndo_set_rx_mode = vlan_dev_set_rx_mode,
894 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
895 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
896 .ndo_do_ioctl = vlan_dev_ioctl,
897 .ndo_neigh_setup = vlan_dev_neigh_setup,
898 .ndo_get_stats = vlan_dev_get_stats,
899#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
900 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
901 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
902 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
903 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
904 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
905#endif
906};
907
908static const struct net_device_ops vlan_netdev_accel_ops_sq = {
909 .ndo_select_queue = vlan_dev_select_queue,
910 .ndo_change_mtu = vlan_dev_change_mtu,
911 .ndo_init = vlan_dev_init,
912 .ndo_uninit = vlan_dev_uninit,
913 .ndo_open = vlan_dev_open,
914 .ndo_stop = vlan_dev_stop,
915 .ndo_start_xmit = vlan_dev_hwaccel_hard_start_xmit,
916 .ndo_validate_addr = eth_validate_addr,
917 .ndo_set_mac_address = vlan_dev_set_mac_address,
918 .ndo_set_rx_mode = vlan_dev_set_rx_mode,
919 .ndo_set_multicast_list = vlan_dev_set_rx_mode,
920 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
921 .ndo_do_ioctl = vlan_dev_ioctl,
922 .ndo_neigh_setup = vlan_dev_neigh_setup,
923 .ndo_get_stats = vlan_dev_get_stats,
924#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
925 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
926 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
927 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
928 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
929 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
930#endif
931};
932
869void vlan_setup(struct net_device *dev) 933void vlan_setup(struct net_device *dev)
870{ 934{
871 ether_setup(dev); 935 ether_setup(dev);
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 9ec1f057c03a..afead353e215 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -140,7 +140,7 @@ void vlan_proc_cleanup(struct net *net)
140 * Create /proc/net/vlan entries 140 * Create /proc/net/vlan entries
141 */ 141 */
142 142
143int vlan_proc_init(struct net *net) 143int __net_init vlan_proc_init(struct net *net)
144{ 144{
145 struct vlan_net *vn = net_generic(net, vlan_net_id); 145 struct vlan_net *vn = net_generic(net, vlan_net_id);
146 146
diff --git a/net/9p/client.c b/net/9p/client.c
index 8af95b2dddd6..e3e5bf4469ce 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -46,6 +46,7 @@ enum {
46 Opt_msize, 46 Opt_msize,
47 Opt_trans, 47 Opt_trans,
48 Opt_legacy, 48 Opt_legacy,
49 Opt_version,
49 Opt_err, 50 Opt_err,
50}; 51};
51 52
@@ -53,9 +54,42 @@ static const match_table_t tokens = {
53 {Opt_msize, "msize=%u"}, 54 {Opt_msize, "msize=%u"},
54 {Opt_legacy, "noextend"}, 55 {Opt_legacy, "noextend"},
55 {Opt_trans, "trans=%s"}, 56 {Opt_trans, "trans=%s"},
57 {Opt_version, "version=%s"},
56 {Opt_err, NULL}, 58 {Opt_err, NULL},
57}; 59};
58 60
61inline int p9_is_proto_dotl(struct p9_client *clnt)
62{
63 return (clnt->proto_version == p9_proto_2000L);
64}
65EXPORT_SYMBOL(p9_is_proto_dotl);
66
67inline int p9_is_proto_dotu(struct p9_client *clnt)
68{
69 return (clnt->proto_version == p9_proto_2000u);
70}
71EXPORT_SYMBOL(p9_is_proto_dotu);
72
73/* Interpret mount option for protocol version */
74static unsigned char get_protocol_version(const substring_t *name)
75{
76 unsigned char version = -EINVAL;
77 if (!strncmp("9p2000", name->from, name->to-name->from)) {
78 version = p9_proto_legacy;
79 P9_DPRINTK(P9_DEBUG_9P, "Protocol version: Legacy\n");
80 } else if (!strncmp("9p2000.u", name->from, name->to-name->from)) {
81 version = p9_proto_2000u;
82 P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2000.u\n");
83 } else if (!strncmp("9p2000.L", name->from, name->to-name->from)) {
84 version = p9_proto_2000L;
85 P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2000.L\n");
86 } else {
87 P9_DPRINTK(P9_DEBUG_ERROR, "Unknown protocol version %s. ",
88 name->from);
89 }
90 return version;
91}
92
59static struct p9_req_t * 93static struct p9_req_t *
60p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...); 94p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
61 95
@@ -69,24 +103,25 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
69 103
70static int parse_opts(char *opts, struct p9_client *clnt) 104static int parse_opts(char *opts, struct p9_client *clnt)
71{ 105{
72 char *options; 106 char *options, *tmp_options;
73 char *p; 107 char *p;
74 substring_t args[MAX_OPT_ARGS]; 108 substring_t args[MAX_OPT_ARGS];
75 int option; 109 int option;
76 int ret = 0; 110 int ret = 0;
77 111
78 clnt->dotu = 1; 112 clnt->proto_version = p9_proto_2000u;
79 clnt->msize = 8192; 113 clnt->msize = 8192;
80 114
81 if (!opts) 115 if (!opts)
82 return 0; 116 return 0;
83 117
84 options = kstrdup(opts, GFP_KERNEL); 118 tmp_options = kstrdup(opts, GFP_KERNEL);
85 if (!options) { 119 if (!tmp_options) {
86 P9_DPRINTK(P9_DEBUG_ERROR, 120 P9_DPRINTK(P9_DEBUG_ERROR,
87 "failed to allocate copy of option string\n"); 121 "failed to allocate copy of option string\n");
88 return -ENOMEM; 122 return -ENOMEM;
89 } 123 }
124 options = tmp_options;
90 125
91 while ((p = strsep(&options, ",")) != NULL) { 126 while ((p = strsep(&options, ",")) != NULL) {
92 int token; 127 int token;
@@ -108,16 +143,30 @@ static int parse_opts(char *opts, struct p9_client *clnt)
108 break; 143 break;
109 case Opt_trans: 144 case Opt_trans:
110 clnt->trans_mod = v9fs_get_trans_by_name(&args[0]); 145 clnt->trans_mod = v9fs_get_trans_by_name(&args[0]);
146 if(clnt->trans_mod == NULL) {
147 P9_DPRINTK(P9_DEBUG_ERROR,
148 "Could not find request transport: %s\n",
149 (char *) &args[0]);
150 ret = -EINVAL;
151 goto free_and_return;
152 }
111 break; 153 break;
112 case Opt_legacy: 154 case Opt_legacy:
113 clnt->dotu = 0; 155 clnt->proto_version = p9_proto_legacy;
156 break;
157 case Opt_version:
158 ret = get_protocol_version(&args[0]);
159 if (ret == -EINVAL)
160 goto free_and_return;
161 clnt->proto_version = ret;
114 break; 162 break;
115 default: 163 default:
116 continue; 164 continue;
117 } 165 }
118 } 166 }
119 167
120 kfree(options); 168free_and_return:
169 kfree(tmp_options);
121 return ret; 170 return ret;
122} 171}
123 172
@@ -401,14 +450,15 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
401 int ecode; 450 int ecode;
402 char *ename; 451 char *ename;
403 452
404 err = p9pdu_readf(req->rc, c->dotu, "s?d", &ename, &ecode); 453 err = p9pdu_readf(req->rc, c->proto_version, "s?d",
454 &ename, &ecode);
405 if (err) { 455 if (err) {
406 P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n", 456 P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n",
407 err); 457 err);
408 return err; 458 return err;
409 } 459 }
410 460
411 if (c->dotu) 461 if (p9_is_proto_dotu(c))
412 err = -ecode; 462 err = -ecode;
413 463
414 if (!err || !IS_ERR_VALUE(err)) 464 if (!err || !IS_ERR_VALUE(err))
@@ -506,7 +556,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
506 /* marshall the data */ 556 /* marshall the data */
507 p9pdu_prepare(req->tc, tag, type); 557 p9pdu_prepare(req->tc, tag, type);
508 va_start(ap, fmt); 558 va_start(ap, fmt);
509 err = p9pdu_vwritef(req->tc, c->dotu, fmt, ap); 559 err = p9pdu_vwritef(req->tc, c->proto_version, fmt, ap);
510 va_end(ap); 560 va_end(ap);
511 p9pdu_finalize(req->tc); 561 p9pdu_finalize(req->tc);
512 562
@@ -618,14 +668,31 @@ int p9_client_version(struct p9_client *c)
618 char *version; 668 char *version;
619 int msize; 669 int msize;
620 670
621 P9_DPRINTK(P9_DEBUG_9P, ">>> TVERSION msize %d extended %d\n", 671 P9_DPRINTK(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n",
622 c->msize, c->dotu); 672 c->msize, c->proto_version);
623 req = p9_client_rpc(c, P9_TVERSION, "ds", c->msize, 673
624 c->dotu ? "9P2000.u" : "9P2000"); 674 switch (c->proto_version) {
675 case p9_proto_2000L:
676 req = p9_client_rpc(c, P9_TVERSION, "ds",
677 c->msize, "9P2000.L");
678 break;
679 case p9_proto_2000u:
680 req = p9_client_rpc(c, P9_TVERSION, "ds",
681 c->msize, "9P2000.u");
682 break;
683 case p9_proto_legacy:
684 req = p9_client_rpc(c, P9_TVERSION, "ds",
685 c->msize, "9P2000");
686 break;
687 default:
688 return -EINVAL;
689 break;
690 }
691
625 if (IS_ERR(req)) 692 if (IS_ERR(req))
626 return PTR_ERR(req); 693 return PTR_ERR(req);
627 694
628 err = p9pdu_readf(req->rc, c->dotu, "ds", &msize, &version); 695 err = p9pdu_readf(req->rc, c->proto_version, "ds", &msize, &version);
629 if (err) { 696 if (err) {
630 P9_DPRINTK(P9_DEBUG_9P, "version error %d\n", err); 697 P9_DPRINTK(P9_DEBUG_9P, "version error %d\n", err);
631 p9pdu_dump(1, req->rc); 698 p9pdu_dump(1, req->rc);
@@ -633,10 +700,12 @@ int p9_client_version(struct p9_client *c)
633 } 700 }
634 701
635 P9_DPRINTK(P9_DEBUG_9P, "<<< RVERSION msize %d %s\n", msize, version); 702 P9_DPRINTK(P9_DEBUG_9P, "<<< RVERSION msize %d %s\n", msize, version);
636 if (!memcmp(version, "9P2000.u", 8)) 703 if (!strncmp(version, "9P2000.L", 8))
637 c->dotu = 1; 704 c->proto_version = p9_proto_2000L;
638 else if (!memcmp(version, "9P2000", 6)) 705 else if (!strncmp(version, "9P2000.u", 8))
639 c->dotu = 0; 706 c->proto_version = p9_proto_2000u;
707 else if (!strncmp(version, "9P2000", 6))
708 c->proto_version = p9_proto_legacy;
640 else { 709 else {
641 err = -EREMOTEIO; 710 err = -EREMOTEIO;
642 goto error; 711 goto error;
@@ -667,18 +736,12 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
667 clnt->trans = NULL; 736 clnt->trans = NULL;
668 spin_lock_init(&clnt->lock); 737 spin_lock_init(&clnt->lock);
669 INIT_LIST_HEAD(&clnt->fidlist); 738 INIT_LIST_HEAD(&clnt->fidlist);
670 clnt->fidpool = p9_idpool_create();
671 if (IS_ERR(clnt->fidpool)) {
672 err = PTR_ERR(clnt->fidpool);
673 clnt->fidpool = NULL;
674 goto error;
675 }
676 739
677 p9_tag_init(clnt); 740 p9_tag_init(clnt);
678 741
679 err = parse_opts(options, clnt); 742 err = parse_opts(options, clnt);
680 if (err < 0) 743 if (err < 0)
681 goto error; 744 goto free_client;
682 745
683 if (!clnt->trans_mod) 746 if (!clnt->trans_mod)
684 clnt->trans_mod = v9fs_get_default_trans(); 747 clnt->trans_mod = v9fs_get_default_trans();
@@ -687,27 +750,40 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
687 err = -EPROTONOSUPPORT; 750 err = -EPROTONOSUPPORT;
688 P9_DPRINTK(P9_DEBUG_ERROR, 751 P9_DPRINTK(P9_DEBUG_ERROR,
689 "No transport defined or default transport\n"); 752 "No transport defined or default transport\n");
690 goto error; 753 goto free_client;
691 } 754 }
692 755
693 P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n", 756 clnt->fidpool = p9_idpool_create();
694 clnt, clnt->trans_mod, clnt->msize, clnt->dotu); 757 if (IS_ERR(clnt->fidpool)) {
758 err = PTR_ERR(clnt->fidpool);
759 clnt->fidpool = NULL;
760 goto put_trans;
761 }
762
763 P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d protocol %d\n",
764 clnt, clnt->trans_mod, clnt->msize, clnt->proto_version);
695 765
696 err = clnt->trans_mod->create(clnt, dev_name, options); 766 err = clnt->trans_mod->create(clnt, dev_name, options);
697 if (err) 767 if (err)
698 goto error; 768 goto destroy_fidpool;
699 769
700 if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize) 770 if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize)
701 clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ; 771 clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ;
702 772
703 err = p9_client_version(clnt); 773 err = p9_client_version(clnt);
704 if (err) 774 if (err)
705 goto error; 775 goto close_trans;
706 776
707 return clnt; 777 return clnt;
708 778
709error: 779close_trans:
710 p9_client_destroy(clnt); 780 clnt->trans_mod->close(clnt);
781destroy_fidpool:
782 p9_idpool_destroy(clnt->fidpool);
783put_trans:
784 v9fs_put_trans(clnt->trans_mod);
785free_client:
786 kfree(clnt);
711 return ERR_PTR(err); 787 return ERR_PTR(err);
712} 788}
713EXPORT_SYMBOL(p9_client_create); 789EXPORT_SYMBOL(p9_client_create);
@@ -768,7 +844,7 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
768 goto error; 844 goto error;
769 } 845 }
770 846
771 err = p9pdu_readf(req->rc, clnt->dotu, "Q", &qid); 847 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid);
772 if (err) { 848 if (err) {
773 p9pdu_dump(1, req->rc); 849 p9pdu_dump(1, req->rc);
774 p9_free_req(clnt, req); 850 p9_free_req(clnt, req);
@@ -817,7 +893,7 @@ p9_client_auth(struct p9_client *clnt, char *uname, u32 n_uname, char *aname)
817 goto error; 893 goto error;
818 } 894 }
819 895
820 err = p9pdu_readf(req->rc, clnt->dotu, "Q", &qid); 896 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid);
821 if (err) { 897 if (err) {
822 p9pdu_dump(1, req->rc); 898 p9pdu_dump(1, req->rc);
823 p9_free_req(clnt, req); 899 p9_free_req(clnt, req);
@@ -875,7 +951,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
875 goto error; 951 goto error;
876 } 952 }
877 953
878 err = p9pdu_readf(req->rc, clnt->dotu, "R", &nwqids, &wqids); 954 err = p9pdu_readf(req->rc, clnt->proto_version, "R", &nwqids, &wqids);
879 if (err) { 955 if (err) {
880 p9pdu_dump(1, req->rc); 956 p9pdu_dump(1, req->rc);
881 p9_free_req(clnt, req); 957 p9_free_req(clnt, req);
@@ -936,7 +1012,7 @@ int p9_client_open(struct p9_fid *fid, int mode)
936 goto error; 1012 goto error;
937 } 1013 }
938 1014
939 err = p9pdu_readf(req->rc, clnt->dotu, "Qd", &qid, &iounit); 1015 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit);
940 if (err) { 1016 if (err) {
941 p9pdu_dump(1, req->rc); 1017 p9pdu_dump(1, req->rc);
942 goto free_and_error; 1018 goto free_and_error;
@@ -981,7 +1057,7 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
981 goto error; 1057 goto error;
982 } 1058 }
983 1059
984 err = p9pdu_readf(req->rc, clnt->dotu, "Qd", &qid, &iounit); 1060 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit);
985 if (err) { 1061 if (err) {
986 p9pdu_dump(1, req->rc); 1062 p9pdu_dump(1, req->rc);
987 goto free_and_error; 1063 goto free_and_error;
@@ -1082,7 +1158,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1082 goto error; 1158 goto error;
1083 } 1159 }
1084 1160
1085 err = p9pdu_readf(req->rc, clnt->dotu, "D", &count, &dataptr); 1161 err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr);
1086 if (err) { 1162 if (err) {
1087 p9pdu_dump(1, req->rc); 1163 p9pdu_dump(1, req->rc);
1088 goto free_and_error; 1164 goto free_and_error;
@@ -1143,7 +1219,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1143 goto error; 1219 goto error;
1144 } 1220 }
1145 1221
1146 err = p9pdu_readf(req->rc, clnt->dotu, "d", &count); 1222 err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count);
1147 if (err) { 1223 if (err) {
1148 p9pdu_dump(1, req->rc); 1224 p9pdu_dump(1, req->rc);
1149 goto free_and_error; 1225 goto free_and_error;
@@ -1183,7 +1259,7 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
1183 goto error; 1259 goto error;
1184 } 1260 }
1185 1261
1186 err = p9pdu_readf(req->rc, clnt->dotu, "wS", &ignored, ret); 1262 err = p9pdu_readf(req->rc, clnt->proto_version, "wS", &ignored, ret);
1187 if (err) { 1263 if (err) {
1188 p9pdu_dump(1, req->rc); 1264 p9pdu_dump(1, req->rc);
1189 p9_free_req(clnt, req); 1265 p9_free_req(clnt, req);
@@ -1210,14 +1286,15 @@ error:
1210} 1286}
1211EXPORT_SYMBOL(p9_client_stat); 1287EXPORT_SYMBOL(p9_client_stat);
1212 1288
1213static int p9_client_statsize(struct p9_wstat *wst, int optional) 1289static int p9_client_statsize(struct p9_wstat *wst, int proto_version)
1214{ 1290{
1215 int ret; 1291 int ret;
1216 1292
1293 /* NOTE: size shouldn't include its own length */
1217 /* size[2] type[2] dev[4] qid[13] */ 1294 /* size[2] type[2] dev[4] qid[13] */
1218 /* mode[4] atime[4] mtime[4] length[8]*/ 1295 /* mode[4] atime[4] mtime[4] length[8]*/
1219 /* name[s] uid[s] gid[s] muid[s] */ 1296 /* name[s] uid[s] gid[s] muid[s] */
1220 ret = 2+2+4+13+4+4+4+8+2+2+2+2; 1297 ret = 2+4+13+4+4+4+8+2+2+2+2;
1221 1298
1222 if (wst->name) 1299 if (wst->name)
1223 ret += strlen(wst->name); 1300 ret += strlen(wst->name);
@@ -1228,7 +1305,7 @@ static int p9_client_statsize(struct p9_wstat *wst, int optional)
1228 if (wst->muid) 1305 if (wst->muid)
1229 ret += strlen(wst->muid); 1306 ret += strlen(wst->muid);
1230 1307
1231 if (optional) { 1308 if (proto_version == p9_proto_2000u) {
1232 ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */ 1309 ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */
1233 if (wst->extension) 1310 if (wst->extension)
1234 ret += strlen(wst->extension); 1311 ret += strlen(wst->extension);
@@ -1245,7 +1322,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
1245 1322
1246 err = 0; 1323 err = 0;
1247 clnt = fid->clnt; 1324 clnt = fid->clnt;
1248 wst->size = p9_client_statsize(wst, clnt->dotu); 1325 wst->size = p9_client_statsize(wst, clnt->proto_version);
1249 P9_DPRINTK(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid); 1326 P9_DPRINTK(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid);
1250 P9_DPRINTK(P9_DEBUG_9P, 1327 P9_DPRINTK(P9_DEBUG_9P,
1251 " sz=%x type=%x dev=%x qid=%x.%llx.%x\n" 1328 " sz=%x type=%x dev=%x qid=%x.%llx.%x\n"
@@ -1258,7 +1335,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
1258 wst->name, wst->uid, wst->gid, wst->muid, wst->extension, 1335 wst->name, wst->uid, wst->gid, wst->muid, wst->extension,
1259 wst->n_uid, wst->n_gid, wst->n_muid); 1336 wst->n_uid, wst->n_gid, wst->n_muid);
1260 1337
1261 req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size, wst); 1338 req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst);
1262 if (IS_ERR(req)) { 1339 if (IS_ERR(req)) {
1263 err = PTR_ERR(req); 1340 err = PTR_ERR(req);
1264 goto error; 1341 goto error;
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index fc70147c771e..94f5a8f65e9c 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -52,7 +52,7 @@
52#endif 52#endif
53 53
54static int 54static int
55p9pdu_writef(struct p9_fcall *pdu, int optional, const char *fmt, ...); 55p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
56 56
57#ifdef CONFIG_NET_9P_DEBUG 57#ifdef CONFIG_NET_9P_DEBUG
58void 58void
@@ -144,7 +144,8 @@ pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
144*/ 144*/
145 145
146static int 146static int
147p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) 147p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
148 va_list ap)
148{ 149{
149 const char *ptr; 150 const char *ptr;
150 int errcode = 0; 151 int errcode = 0;
@@ -194,7 +195,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
194 int16_t len; 195 int16_t len;
195 int size; 196 int size;
196 197
197 errcode = p9pdu_readf(pdu, optional, "w", &len); 198 errcode = p9pdu_readf(pdu, proto_version,
199 "w", &len);
198 if (errcode) 200 if (errcode)
199 break; 201 break;
200 202
@@ -217,7 +219,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
217 struct p9_qid *qid = 219 struct p9_qid *qid =
218 va_arg(ap, struct p9_qid *); 220 va_arg(ap, struct p9_qid *);
219 221
220 errcode = p9pdu_readf(pdu, optional, "bdq", 222 errcode = p9pdu_readf(pdu, proto_version, "bdq",
221 &qid->type, &qid->version, 223 &qid->type, &qid->version,
222 &qid->path); 224 &qid->path);
223 } 225 }
@@ -230,7 +232,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
230 stbuf->n_uid = stbuf->n_gid = stbuf->n_muid = 232 stbuf->n_uid = stbuf->n_gid = stbuf->n_muid =
231 -1; 233 -1;
232 errcode = 234 errcode =
233 p9pdu_readf(pdu, optional, 235 p9pdu_readf(pdu, proto_version,
234 "wwdQdddqssss?sddd", 236 "wwdQdddqssss?sddd",
235 &stbuf->size, &stbuf->type, 237 &stbuf->size, &stbuf->type,
236 &stbuf->dev, &stbuf->qid, 238 &stbuf->dev, &stbuf->qid,
@@ -250,7 +252,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
250 void **data = va_arg(ap, void **); 252 void **data = va_arg(ap, void **);
251 253
252 errcode = 254 errcode =
253 p9pdu_readf(pdu, optional, "d", count); 255 p9pdu_readf(pdu, proto_version, "d", count);
254 if (!errcode) { 256 if (!errcode) {
255 *count = 257 *count =
256 MIN(*count, 258 MIN(*count,
@@ -263,8 +265,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
263 int16_t *nwname = va_arg(ap, int16_t *); 265 int16_t *nwname = va_arg(ap, int16_t *);
264 char ***wnames = va_arg(ap, char ***); 266 char ***wnames = va_arg(ap, char ***);
265 267
266 errcode = 268 errcode = p9pdu_readf(pdu, proto_version,
267 p9pdu_readf(pdu, optional, "w", nwname); 269 "w", nwname);
268 if (!errcode) { 270 if (!errcode) {
269 *wnames = 271 *wnames =
270 kmalloc(sizeof(char *) * *nwname, 272 kmalloc(sizeof(char *) * *nwname,
@@ -278,7 +280,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
278 280
279 for (i = 0; i < *nwname; i++) { 281 for (i = 0; i < *nwname; i++) {
280 errcode = 282 errcode =
281 p9pdu_readf(pdu, optional, 283 p9pdu_readf(pdu,
284 proto_version,
282 "s", 285 "s",
283 &(*wnames)[i]); 286 &(*wnames)[i]);
284 if (errcode) 287 if (errcode)
@@ -306,7 +309,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
306 *wqids = NULL; 309 *wqids = NULL;
307 310
308 errcode = 311 errcode =
309 p9pdu_readf(pdu, optional, "w", nwqid); 312 p9pdu_readf(pdu, proto_version, "w", nwqid);
310 if (!errcode) { 313 if (!errcode) {
311 *wqids = 314 *wqids =
312 kmalloc(*nwqid * 315 kmalloc(*nwqid *
@@ -321,7 +324,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
321 324
322 for (i = 0; i < *nwqid; i++) { 325 for (i = 0; i < *nwqid; i++) {
323 errcode = 326 errcode =
324 p9pdu_readf(pdu, optional, 327 p9pdu_readf(pdu,
328 proto_version,
325 "Q", 329 "Q",
326 &(*wqids)[i]); 330 &(*wqids)[i]);
327 if (errcode) 331 if (errcode)
@@ -336,7 +340,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
336 } 340 }
337 break; 341 break;
338 case '?': 342 case '?':
339 if (!optional) 343 if (proto_version != p9_proto_2000u)
340 return 0; 344 return 0;
341 break; 345 break;
342 default: 346 default:
@@ -352,7 +356,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
352} 356}
353 357
354int 358int
355p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap) 359p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
360 va_list ap)
356{ 361{
357 const char *ptr; 362 const char *ptr;
358 int errcode = 0; 363 int errcode = 0;
@@ -389,7 +394,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
389 if (sptr) 394 if (sptr)
390 len = MIN(strlen(sptr), USHORT_MAX); 395 len = MIN(strlen(sptr), USHORT_MAX);
391 396
392 errcode = p9pdu_writef(pdu, optional, "w", len); 397 errcode = p9pdu_writef(pdu, proto_version,
398 "w", len);
393 if (!errcode && pdu_write(pdu, sptr, len)) 399 if (!errcode && pdu_write(pdu, sptr, len))
394 errcode = -EFAULT; 400 errcode = -EFAULT;
395 } 401 }
@@ -398,7 +404,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
398 const struct p9_qid *qid = 404 const struct p9_qid *qid =
399 va_arg(ap, const struct p9_qid *); 405 va_arg(ap, const struct p9_qid *);
400 errcode = 406 errcode =
401 p9pdu_writef(pdu, optional, "bdq", 407 p9pdu_writef(pdu, proto_version, "bdq",
402 qid->type, qid->version, 408 qid->type, qid->version,
403 qid->path); 409 qid->path);
404 } break; 410 } break;
@@ -406,7 +412,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
406 const struct p9_wstat *stbuf = 412 const struct p9_wstat *stbuf =
407 va_arg(ap, const struct p9_wstat *); 413 va_arg(ap, const struct p9_wstat *);
408 errcode = 414 errcode =
409 p9pdu_writef(pdu, optional, 415 p9pdu_writef(pdu, proto_version,
410 "wwdQdddqssss?sddd", 416 "wwdQdddqssss?sddd",
411 stbuf->size, stbuf->type, 417 stbuf->size, stbuf->type,
412 stbuf->dev, &stbuf->qid, 418 stbuf->dev, &stbuf->qid,
@@ -421,8 +427,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
421 int32_t count = va_arg(ap, int32_t); 427 int32_t count = va_arg(ap, int32_t);
422 const void *data = va_arg(ap, const void *); 428 const void *data = va_arg(ap, const void *);
423 429
424 errcode = 430 errcode = p9pdu_writef(pdu, proto_version, "d",
425 p9pdu_writef(pdu, optional, "d", count); 431 count);
426 if (!errcode && pdu_write(pdu, data, count)) 432 if (!errcode && pdu_write(pdu, data, count))
427 errcode = -EFAULT; 433 errcode = -EFAULT;
428 } 434 }
@@ -431,8 +437,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
431 int32_t count = va_arg(ap, int32_t); 437 int32_t count = va_arg(ap, int32_t);
432 const char __user *udata = 438 const char __user *udata =
433 va_arg(ap, const void __user *); 439 va_arg(ap, const void __user *);
434 errcode = 440 errcode = p9pdu_writef(pdu, proto_version, "d",
435 p9pdu_writef(pdu, optional, "d", count); 441 count);
436 if (!errcode && pdu_write_u(pdu, udata, count)) 442 if (!errcode && pdu_write_u(pdu, udata, count))
437 errcode = -EFAULT; 443 errcode = -EFAULT;
438 } 444 }
@@ -441,14 +447,15 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
441 int16_t nwname = va_arg(ap, int); 447 int16_t nwname = va_arg(ap, int);
442 const char **wnames = va_arg(ap, const char **); 448 const char **wnames = va_arg(ap, const char **);
443 449
444 errcode = 450 errcode = p9pdu_writef(pdu, proto_version, "w",
445 p9pdu_writef(pdu, optional, "w", nwname); 451 nwname);
446 if (!errcode) { 452 if (!errcode) {
447 int i; 453 int i;
448 454
449 for (i = 0; i < nwname; i++) { 455 for (i = 0; i < nwname; i++) {
450 errcode = 456 errcode =
451 p9pdu_writef(pdu, optional, 457 p9pdu_writef(pdu,
458 proto_version,
452 "s", 459 "s",
453 wnames[i]); 460 wnames[i]);
454 if (errcode) 461 if (errcode)
@@ -462,14 +469,15 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
462 struct p9_qid *wqids = 469 struct p9_qid *wqids =
463 va_arg(ap, struct p9_qid *); 470 va_arg(ap, struct p9_qid *);
464 471
465 errcode = 472 errcode = p9pdu_writef(pdu, proto_version, "w",
466 p9pdu_writef(pdu, optional, "w", nwqid); 473 nwqid);
467 if (!errcode) { 474 if (!errcode) {
468 int i; 475 int i;
469 476
470 for (i = 0; i < nwqid; i++) { 477 for (i = 0; i < nwqid; i++) {
471 errcode = 478 errcode =
472 p9pdu_writef(pdu, optional, 479 p9pdu_writef(pdu,
480 proto_version,
473 "Q", 481 "Q",
474 &wqids[i]); 482 &wqids[i]);
475 if (errcode) 483 if (errcode)
@@ -479,7 +487,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
479 } 487 }
480 break; 488 break;
481 case '?': 489 case '?':
482 if (!optional) 490 if (proto_version != p9_proto_2000u)
483 return 0; 491 return 0;
484 break; 492 break;
485 default: 493 default:
@@ -494,32 +502,32 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
494 return errcode; 502 return errcode;
495} 503}
496 504
497int p9pdu_readf(struct p9_fcall *pdu, int optional, const char *fmt, ...) 505int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...)
498{ 506{
499 va_list ap; 507 va_list ap;
500 int ret; 508 int ret;
501 509
502 va_start(ap, fmt); 510 va_start(ap, fmt);
503 ret = p9pdu_vreadf(pdu, optional, fmt, ap); 511 ret = p9pdu_vreadf(pdu, proto_version, fmt, ap);
504 va_end(ap); 512 va_end(ap);
505 513
506 return ret; 514 return ret;
507} 515}
508 516
509static int 517static int
510p9pdu_writef(struct p9_fcall *pdu, int optional, const char *fmt, ...) 518p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...)
511{ 519{
512 va_list ap; 520 va_list ap;
513 int ret; 521 int ret;
514 522
515 va_start(ap, fmt); 523 va_start(ap, fmt);
516 ret = p9pdu_vwritef(pdu, optional, fmt, ap); 524 ret = p9pdu_vwritef(pdu, proto_version, fmt, ap);
517 va_end(ap); 525 va_end(ap);
518 526
519 return ret; 527 return ret;
520} 528}
521 529
522int p9stat_read(char *buf, int len, struct p9_wstat *st, int dotu) 530int p9stat_read(char *buf, int len, struct p9_wstat *st, int proto_version)
523{ 531{
524 struct p9_fcall fake_pdu; 532 struct p9_fcall fake_pdu;
525 int ret; 533 int ret;
@@ -529,7 +537,7 @@ int p9stat_read(char *buf, int len, struct p9_wstat *st, int dotu)
529 fake_pdu.sdata = buf; 537 fake_pdu.sdata = buf;
530 fake_pdu.offset = 0; 538 fake_pdu.offset = 0;
531 539
532 ret = p9pdu_readf(&fake_pdu, dotu, "S", st); 540 ret = p9pdu_readf(&fake_pdu, proto_version, "S", st);
533 if (ret) { 541 if (ret) {
534 P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret); 542 P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
535 p9pdu_dump(1, &fake_pdu); 543 p9pdu_dump(1, &fake_pdu);
diff --git a/net/9p/protocol.h b/net/9p/protocol.h
index ccde462e7ac5..2431c0f38d56 100644
--- a/net/9p/protocol.h
+++ b/net/9p/protocol.h
@@ -25,9 +25,9 @@
25 * 25 *
26 */ 26 */
27 27
28int 28int p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
29p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap); 29 va_list ap);
30int p9pdu_readf(struct p9_fcall *pdu, int optional, const char *fmt, ...); 30int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
31int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type); 31int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type);
32int p9pdu_finalize(struct p9_fcall *pdu); 32int p9pdu_finalize(struct p9_fcall *pdu);
33void p9pdu_dump(int, struct p9_fcall *); 33void p9pdu_dump(int, struct p9_fcall *);
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 4dd873e3a1bb..31d0b05582a9 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -42,6 +42,8 @@
42#include <net/9p/client.h> 42#include <net/9p/client.h>
43#include <net/9p/transport.h> 43#include <net/9p/transport.h>
44 44
45#include <linux/syscalls.h> /* killme */
46
45#define P9_PORT 564 47#define P9_PORT 564
46#define MAX_SOCK_BUF (64*1024) 48#define MAX_SOCK_BUF (64*1024)
47#define MAXPOLLWADDR 2 49#define MAXPOLLWADDR 2
@@ -712,7 +714,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
712 char *p; 714 char *p;
713 substring_t args[MAX_OPT_ARGS]; 715 substring_t args[MAX_OPT_ARGS];
714 int option; 716 int option;
715 char *options; 717 char *options, *tmp_options;
716 int ret; 718 int ret;
717 719
718 opts->port = P9_PORT; 720 opts->port = P9_PORT;
@@ -722,12 +724,13 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
722 if (!params) 724 if (!params)
723 return 0; 725 return 0;
724 726
725 options = kstrdup(params, GFP_KERNEL); 727 tmp_options = kstrdup(params, GFP_KERNEL);
726 if (!options) { 728 if (!tmp_options) {
727 P9_DPRINTK(P9_DEBUG_ERROR, 729 P9_DPRINTK(P9_DEBUG_ERROR,
728 "failed to allocate copy of option string\n"); 730 "failed to allocate copy of option string\n");
729 return -ENOMEM; 731 return -ENOMEM;
730 } 732 }
733 options = tmp_options;
731 734
732 while ((p = strsep(&options, ",")) != NULL) { 735 while ((p = strsep(&options, ",")) != NULL) {
733 int token; 736 int token;
@@ -758,7 +761,8 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
758 continue; 761 continue;
759 } 762 }
760 } 763 }
761 kfree(options); 764
765 kfree(tmp_options);
762 return 0; 766 return 0;
763} 767}
764 768
@@ -788,24 +792,41 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
788 792
789static int p9_socket_open(struct p9_client *client, struct socket *csocket) 793static int p9_socket_open(struct p9_client *client, struct socket *csocket)
790{ 794{
791 int fd, ret; 795 struct p9_trans_fd *p;
796 int ret, fd;
797
798 p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
799 if (!p)
800 return -ENOMEM;
792 801
793 csocket->sk->sk_allocation = GFP_NOIO; 802 csocket->sk->sk_allocation = GFP_NOIO;
794 fd = sock_map_fd(csocket, 0); 803 fd = sock_map_fd(csocket, 0);
795 if (fd < 0) { 804 if (fd < 0) {
796 P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n"); 805 P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n");
806 sock_release(csocket);
807 kfree(p);
797 return fd; 808 return fd;
798 } 809 }
799 810
800 ret = p9_fd_open(client, fd, fd); 811 get_file(csocket->file);
801 if (ret < 0) { 812 get_file(csocket->file);
802 P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to open fd\n"); 813 p->wr = p->rd = csocket->file;
814 client->trans = p;
815 client->status = Connected;
816
817 sys_close(fd); /* still racy */
818
819 p->rd->f_flags |= O_NONBLOCK;
820
821 p->conn = p9_conn_create(client);
822 if (IS_ERR(p->conn)) {
823 ret = PTR_ERR(p->conn);
824 p->conn = NULL;
825 kfree(p);
826 sockfd_put(csocket);
803 sockfd_put(csocket); 827 sockfd_put(csocket);
804 return ret; 828 return ret;
805 } 829 }
806
807 ((struct p9_trans_fd *)client->trans)->rd->f_flags |= O_NONBLOCK;
808
809 return 0; 830 return 0;
810} 831}
811 832
@@ -883,7 +904,6 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
883 struct socket *csocket; 904 struct socket *csocket;
884 struct sockaddr_in sin_server; 905 struct sockaddr_in sin_server;
885 struct p9_fd_opts opts; 906 struct p9_fd_opts opts;
886 struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
887 907
888 err = parse_opts(args, &opts); 908 err = parse_opts(args, &opts);
889 if (err < 0) 909 if (err < 0)
@@ -897,12 +917,11 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
897 sin_server.sin_family = AF_INET; 917 sin_server.sin_family = AF_INET;
898 sin_server.sin_addr.s_addr = in_aton(addr); 918 sin_server.sin_addr.s_addr = in_aton(addr);
899 sin_server.sin_port = htons(opts.port); 919 sin_server.sin_port = htons(opts.port);
900 sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket); 920 err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket);
901 921
902 if (!csocket) { 922 if (err) {
903 P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n"); 923 P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
904 err = -EIO; 924 return err;
905 goto error;
906 } 925 }
907 926
908 err = csocket->ops->connect(csocket, 927 err = csocket->ops->connect(csocket,
@@ -912,30 +931,11 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
912 P9_EPRINTK(KERN_ERR, 931 P9_EPRINTK(KERN_ERR,
913 "p9_trans_tcp: problem connecting socket to %s\n", 932 "p9_trans_tcp: problem connecting socket to %s\n",
914 addr); 933 addr);
915 goto error;
916 }
917
918 err = p9_socket_open(client, csocket);
919 if (err < 0)
920 goto error;
921
922 p = (struct p9_trans_fd *) client->trans;
923 p->conn = p9_conn_create(client);
924 if (IS_ERR(p->conn)) {
925 err = PTR_ERR(p->conn);
926 p->conn = NULL;
927 goto error;
928 }
929
930 return 0;
931
932error:
933 if (csocket)
934 sock_release(csocket); 934 sock_release(csocket);
935 return err;
936 }
935 937
936 kfree(p); 938 return p9_socket_open(client, csocket);
937
938 return err;
939} 939}
940 940
941static int 941static int
@@ -944,49 +944,33 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
944 int err; 944 int err;
945 struct socket *csocket; 945 struct socket *csocket;
946 struct sockaddr_un sun_server; 946 struct sockaddr_un sun_server;
947 struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
948 947
949 csocket = NULL; 948 csocket = NULL;
950 949
951 if (strlen(addr) > UNIX_PATH_MAX) { 950 if (strlen(addr) > UNIX_PATH_MAX) {
952 P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n", 951 P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
953 addr); 952 addr);
954 err = -ENAMETOOLONG; 953 return -ENAMETOOLONG;
955 goto error;
956 } 954 }
957 955
958 sun_server.sun_family = PF_UNIX; 956 sun_server.sun_family = PF_UNIX;
959 strcpy(sun_server.sun_path, addr); 957 strcpy(sun_server.sun_path, addr);
960 sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket); 958 err = sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket);
959 if (err < 0) {
960 P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n");
961 return err;
962 }
961 err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server, 963 err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
962 sizeof(struct sockaddr_un) - 1, 0); 964 sizeof(struct sockaddr_un) - 1, 0);
963 if (err < 0) { 965 if (err < 0) {
964 P9_EPRINTK(KERN_ERR, 966 P9_EPRINTK(KERN_ERR,
965 "p9_trans_unix: problem connecting socket: %s: %d\n", 967 "p9_trans_unix: problem connecting socket: %s: %d\n",
966 addr, err); 968 addr, err);
967 goto error;
968 }
969
970 err = p9_socket_open(client, csocket);
971 if (err < 0)
972 goto error;
973
974 p = (struct p9_trans_fd *) client->trans;
975 p->conn = p9_conn_create(client);
976 if (IS_ERR(p->conn)) {
977 err = PTR_ERR(p->conn);
978 p->conn = NULL;
979 goto error;
980 }
981
982 return 0;
983
984error:
985 if (csocket)
986 sock_release(csocket); 969 sock_release(csocket);
970 return err;
971 }
987 972
988 kfree(p); 973 return p9_socket_open(client, csocket);
989 return err;
990} 974}
991 975
992static int 976static int
@@ -994,7 +978,7 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args)
994{ 978{
995 int err; 979 int err;
996 struct p9_fd_opts opts; 980 struct p9_fd_opts opts;
997 struct p9_trans_fd *p = NULL; /* this get allocated in p9_fd_open */ 981 struct p9_trans_fd *p;
998 982
999 parse_opts(args, &opts); 983 parse_opts(args, &opts);
1000 984
@@ -1005,21 +989,19 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args)
1005 989
1006 err = p9_fd_open(client, opts.rfd, opts.wfd); 990 err = p9_fd_open(client, opts.rfd, opts.wfd);
1007 if (err < 0) 991 if (err < 0)
1008 goto error; 992 return err;
1009 993
1010 p = (struct p9_trans_fd *) client->trans; 994 p = (struct p9_trans_fd *) client->trans;
1011 p->conn = p9_conn_create(client); 995 p->conn = p9_conn_create(client);
1012 if (IS_ERR(p->conn)) { 996 if (IS_ERR(p->conn)) {
1013 err = PTR_ERR(p->conn); 997 err = PTR_ERR(p->conn);
1014 p->conn = NULL; 998 p->conn = NULL;
1015 goto error; 999 fput(p->rd);
1000 fput(p->wr);
1001 return err;
1016 } 1002 }
1017 1003
1018 return 0; 1004 return 0;
1019
1020error:
1021 kfree(p);
1022 return err;
1023} 1005}
1024 1006
1025static struct p9_trans_module p9_tcp_trans = { 1007static struct p9_trans_module p9_tcp_trans = {
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 65cb29db03f8..2c95a89c0f46 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -166,7 +166,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
166 char *p; 166 char *p;
167 substring_t args[MAX_OPT_ARGS]; 167 substring_t args[MAX_OPT_ARGS];
168 int option; 168 int option;
169 char *options; 169 char *options, *tmp_options;
170 int ret; 170 int ret;
171 171
172 opts->port = P9_PORT; 172 opts->port = P9_PORT;
@@ -177,12 +177,13 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
177 if (!params) 177 if (!params)
178 return 0; 178 return 0;
179 179
180 options = kstrdup(params, GFP_KERNEL); 180 tmp_options = kstrdup(params, GFP_KERNEL);
181 if (!options) { 181 if (!tmp_options) {
182 P9_DPRINTK(P9_DEBUG_ERROR, 182 P9_DPRINTK(P9_DEBUG_ERROR,
183 "failed to allocate copy of option string\n"); 183 "failed to allocate copy of option string\n");
184 return -ENOMEM; 184 return -ENOMEM;
185 } 185 }
186 options = tmp_options;
186 187
187 while ((p = strsep(&options, ",")) != NULL) { 188 while ((p = strsep(&options, ",")) != NULL) {
188 int token; 189 int token;
@@ -216,7 +217,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
216 } 217 }
217 /* RQ must be at least as large as the SQ */ 218 /* RQ must be at least as large as the SQ */
218 opts->rq_depth = max(opts->rq_depth, opts->sq_depth); 219 opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
219 kfree(options); 220 kfree(tmp_options);
220 return 0; 221 return 0;
221} 222}
222 223
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index ea1e3daabefe..afde1a89fbb3 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -49,8 +49,6 @@
49 49
50/* a single mutex to manage channel initialization and attachment */ 50/* a single mutex to manage channel initialization and attachment */
51static DEFINE_MUTEX(virtio_9p_lock); 51static DEFINE_MUTEX(virtio_9p_lock);
52/* global which tracks highest initialized channel */
53static int chan_index;
54 52
55/** 53/**
56 * struct virtio_chan - per-instance transport information 54 * struct virtio_chan - per-instance transport information
@@ -68,8 +66,7 @@ static int chan_index;
68 * 66 *
69 */ 67 */
70 68
71static struct virtio_chan { 69struct virtio_chan {
72 bool initialized;
73 bool inuse; 70 bool inuse;
74 71
75 spinlock_t lock; 72 spinlock_t lock;
@@ -80,7 +77,17 @@ static struct virtio_chan {
80 77
81 /* Scatterlist: can be too big for stack. */ 78 /* Scatterlist: can be too big for stack. */
82 struct scatterlist sg[VIRTQUEUE_NUM]; 79 struct scatterlist sg[VIRTQUEUE_NUM];
83} channels[MAX_9P_CHAN]; 80
81 int tag_len;
82 /*
83 * tag name to identify a mount Non-null terminated
84 */
85 char *tag;
86
87 struct list_head chan_list;
88};
89
90static struct list_head virtio_chan_list;
84 91
85/* How many bytes left in this page. */ 92/* How many bytes left in this page. */
86static unsigned int rest_of_page(void *data) 93static unsigned int rest_of_page(void *data)
@@ -102,7 +109,8 @@ static void p9_virtio_close(struct p9_client *client)
102 struct virtio_chan *chan = client->trans; 109 struct virtio_chan *chan = client->trans;
103 110
104 mutex_lock(&virtio_9p_lock); 111 mutex_lock(&virtio_9p_lock);
105 chan->inuse = false; 112 if (chan)
113 chan->inuse = false;
106 mutex_unlock(&virtio_9p_lock); 114 mutex_unlock(&virtio_9p_lock);
107} 115}
108 116
@@ -212,30 +220,38 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
212 return 0; 220 return 0;
213} 221}
214 222
223static ssize_t p9_mount_tag_show(struct device *dev,
224 struct device_attribute *attr, char *buf)
225{
226 struct virtio_chan *chan;
227 struct virtio_device *vdev;
228
229 vdev = dev_to_virtio(dev);
230 chan = vdev->priv;
231
232 return snprintf(buf, chan->tag_len + 1, "%s", chan->tag);
233}
234
235static DEVICE_ATTR(mount_tag, 0444, p9_mount_tag_show, NULL);
236
215/** 237/**
216 * p9_virtio_probe - probe for existence of 9P virtio channels 238 * p9_virtio_probe - probe for existence of 9P virtio channels
217 * @vdev: virtio device to probe 239 * @vdev: virtio device to probe
218 * 240 *
219 * This probes for existing virtio channels. At present only 241 * This probes for existing virtio channels.
220 * a single channel is in use, so in the future more work may need
221 * to be done here.
222 * 242 *
223 */ 243 */
224 244
225static int p9_virtio_probe(struct virtio_device *vdev) 245static int p9_virtio_probe(struct virtio_device *vdev)
226{ 246{
247 __u16 tag_len;
248 char *tag;
227 int err; 249 int err;
228 struct virtio_chan *chan; 250 struct virtio_chan *chan;
229 int index;
230 251
231 mutex_lock(&virtio_9p_lock); 252 chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL);
232 index = chan_index++; 253 if (!chan) {
233 chan = &channels[index]; 254 printk(KERN_ERR "9p: Failed to allocate virtio 9P channel\n");
234 mutex_unlock(&virtio_9p_lock);
235
236 if (chan_index > MAX_9P_CHAN) {
237 printk(KERN_ERR "9p: virtio: Maximum channels exceeded\n");
238 BUG();
239 err = -ENOMEM; 255 err = -ENOMEM;
240 goto fail; 256 goto fail;
241 } 257 }
@@ -254,15 +270,37 @@ static int p9_virtio_probe(struct virtio_device *vdev)
254 sg_init_table(chan->sg, VIRTQUEUE_NUM); 270 sg_init_table(chan->sg, VIRTQUEUE_NUM);
255 271
256 chan->inuse = false; 272 chan->inuse = false;
257 chan->initialized = true; 273 if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) {
274 vdev->config->get(vdev,
275 offsetof(struct virtio_9p_config, tag_len),
276 &tag_len, sizeof(tag_len));
277 } else {
278 err = -EINVAL;
279 goto out_free_vq;
280 }
281 tag = kmalloc(tag_len, GFP_KERNEL);
282 if (!tag) {
283 err = -ENOMEM;
284 goto out_free_vq;
285 }
286 vdev->config->get(vdev, offsetof(struct virtio_9p_config, tag),
287 tag, tag_len);
288 chan->tag = tag;
289 chan->tag_len = tag_len;
290 err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
291 if (err) {
292 kfree(tag);
293 goto out_free_vq;
294 }
295 mutex_lock(&virtio_9p_lock);
296 list_add_tail(&chan->chan_list, &virtio_chan_list);
297 mutex_unlock(&virtio_9p_lock);
258 return 0; 298 return 0;
259 299
260out_free_vq: 300out_free_vq:
261 vdev->config->del_vqs(vdev); 301 vdev->config->del_vqs(vdev);
302 kfree(chan);
262fail: 303fail:
263 mutex_lock(&virtio_9p_lock);
264 chan_index--;
265 mutex_unlock(&virtio_9p_lock);
266 return err; 304 return err;
267} 305}
268 306
@@ -279,38 +317,35 @@ fail:
279 * We use a simple reference count mechanism to ensure that only a single 317 * We use a simple reference count mechanism to ensure that only a single
280 * mount has a channel open at a time. 318 * mount has a channel open at a time.
281 * 319 *
282 * Bugs: doesn't allow identification of a specific channel
283 * to allocate, channels are allocated sequentially. This was
284 * a pragmatic decision to get things rolling, but ideally some
285 * way of identifying the channel to attach to would be nice
286 * if we are going to support multiple channels.
287 *
288 */ 320 */
289 321
290static int 322static int
291p9_virtio_create(struct p9_client *client, const char *devname, char *args) 323p9_virtio_create(struct p9_client *client, const char *devname, char *args)
292{ 324{
293 struct virtio_chan *chan = channels; 325 struct virtio_chan *chan;
294 int index = 0; 326 int ret = -ENOENT;
327 int found = 0;
295 328
296 mutex_lock(&virtio_9p_lock); 329 mutex_lock(&virtio_9p_lock);
297 while (index < MAX_9P_CHAN) { 330 list_for_each_entry(chan, &virtio_chan_list, chan_list) {
298 if (chan->initialized && !chan->inuse) { 331 if (!strncmp(devname, chan->tag, chan->tag_len)) {
299 chan->inuse = true; 332 if (!chan->inuse) {
300 break; 333 chan->inuse = true;
301 } else { 334 found = 1;
302 index++; 335 break;
303 chan = &channels[index]; 336 }
337 ret = -EBUSY;
304 } 338 }
305 } 339 }
306 mutex_unlock(&virtio_9p_lock); 340 mutex_unlock(&virtio_9p_lock);
307 341
308 if (index >= MAX_9P_CHAN) { 342 if (!found) {
309 printk(KERN_ERR "9p: no channels available\n"); 343 printk(KERN_ERR "9p: no channels available\n");
310 return -ENODEV; 344 return ret;
311 } 345 }
312 346
313 client->trans = (void *)chan; 347 client->trans = (void *)chan;
348 client->status = Connected;
314 chan->client = client; 349 chan->client = client;
315 350
316 return 0; 351 return 0;
@@ -327,11 +362,15 @@ static void p9_virtio_remove(struct virtio_device *vdev)
327 struct virtio_chan *chan = vdev->priv; 362 struct virtio_chan *chan = vdev->priv;
328 363
329 BUG_ON(chan->inuse); 364 BUG_ON(chan->inuse);
365 vdev->config->del_vqs(vdev);
366
367 mutex_lock(&virtio_9p_lock);
368 list_del(&chan->chan_list);
369 mutex_unlock(&virtio_9p_lock);
370 sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
371 kfree(chan->tag);
372 kfree(chan);
330 373
331 if (chan->initialized) {
332 vdev->config->del_vqs(vdev);
333 chan->initialized = false;
334 }
335} 374}
336 375
337static struct virtio_device_id id_table[] = { 376static struct virtio_device_id id_table[] = {
@@ -339,13 +378,19 @@ static struct virtio_device_id id_table[] = {
339 { 0 }, 378 { 0 },
340}; 379};
341 380
381static unsigned int features[] = {
382 VIRTIO_9P_MOUNT_TAG,
383};
384
342/* The standard "struct lguest_driver": */ 385/* The standard "struct lguest_driver": */
343static struct virtio_driver p9_virtio_drv = { 386static struct virtio_driver p9_virtio_drv = {
344 .driver.name = KBUILD_MODNAME, 387 .feature_table = features,
345 .driver.owner = THIS_MODULE, 388 .feature_table_size = ARRAY_SIZE(features),
346 .id_table = id_table, 389 .driver.name = KBUILD_MODNAME,
347 .probe = p9_virtio_probe, 390 .driver.owner = THIS_MODULE,
348 .remove = p9_virtio_remove, 391 .id_table = id_table,
392 .probe = p9_virtio_probe,
393 .remove = p9_virtio_remove,
349}; 394};
350 395
351static struct p9_trans_module p9_virtio_trans = { 396static struct p9_trans_module p9_virtio_trans = {
@@ -362,10 +407,7 @@ static struct p9_trans_module p9_virtio_trans = {
362/* The standard init function */ 407/* The standard init function */
363static int __init p9_virtio_init(void) 408static int __init p9_virtio_init(void)
364{ 409{
365 int count; 410 INIT_LIST_HEAD(&virtio_chan_list);
366
367 for (count = 0; count < MAX_9P_CHAN; count++)
368 channels[count].initialized = false;
369 411
370 v9fs_register_trans(&p9_virtio_trans); 412 v9fs_register_trans(&p9_virtio_trans);
371 return register_virtio_driver(&p9_virtio_drv); 413 return register_virtio_driver(&p9_virtio_drv);
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 9d4adfd22757..f2b3b56aa779 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -819,7 +819,7 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev,
819 ma = &ifa->address; 819 ma = &ifa->address;
820 else { /* We need to make a copy of the entry. */ 820 else { /* We need to make a copy of the entry. */
821 da.s_node = sa.s_node; 821 da.s_node = sa.s_node;
822 da.s_net = da.s_net; 822 da.s_net = sa.s_net;
823 ma = &da; 823 ma = &da;
824 } 824 }
825 825
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index 80caad1a31a5..6ef0e761e5de 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -144,40 +144,16 @@ out:
144 return 0; 144 return 0;
145} 145}
146 146
147static __inline__ struct sock *atalk_get_socket_idx(loff_t pos)
148{
149 struct sock *s;
150 struct hlist_node *node;
151
152 sk_for_each(s, node, &atalk_sockets)
153 if (!pos--)
154 goto found;
155 s = NULL;
156found:
157 return s;
158}
159
160static void *atalk_seq_socket_start(struct seq_file *seq, loff_t *pos) 147static void *atalk_seq_socket_start(struct seq_file *seq, loff_t *pos)
161 __acquires(atalk_sockets_lock) 148 __acquires(atalk_sockets_lock)
162{ 149{
163 loff_t l = *pos;
164
165 read_lock_bh(&atalk_sockets_lock); 150 read_lock_bh(&atalk_sockets_lock);
166 return l ? atalk_get_socket_idx(--l) : SEQ_START_TOKEN; 151 return seq_hlist_start_head(&atalk_sockets, *pos);
167} 152}
168 153
169static void *atalk_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos) 154static void *atalk_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
170{ 155{
171 struct sock *i; 156 return seq_hlist_next(v, &atalk_sockets, pos);
172
173 ++*pos;
174 if (v == SEQ_START_TOKEN) {
175 i = sk_head(&atalk_sockets);
176 goto out;
177 }
178 i = sk_next(v);
179out:
180 return i;
181} 157}
182 158
183static void atalk_seq_socket_stop(struct seq_file *seq, void *v) 159static void atalk_seq_socket_stop(struct seq_file *seq, void *v)
@@ -197,7 +173,7 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v)
197 goto out; 173 goto out;
198 } 174 }
199 175
200 s = v; 176 s = sk_entry(v);
201 at = at_sk(s); 177 at = at_sk(s);
202 178
203 seq_printf(seq, "%02X %04X:%02X:%02X %04X:%02X:%02X %08X:%08X " 179 seq_printf(seq, "%02X %04X:%02X:%02X %04X:%02X:%02X %08X:%08X "
diff --git a/net/atm/addr.c b/net/atm/addr.c
index 82e85abc303d..cf3ae8b47572 100644
--- a/net/atm/addr.c
+++ b/net/atm/addr.c
@@ -4,7 +4,7 @@
4 4
5#include <linux/atm.h> 5#include <linux/atm.h>
6#include <linux/atmdev.h> 6#include <linux/atmdev.h>
7#include <asm/uaccess.h> 7#include <linux/uaccess.h>
8 8
9#include "signaling.h" 9#include "signaling.h"
10#include "addr.h" 10#include "addr.h"
diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
index 02cc7e71efea..fc63526d8695 100644
--- a/net/atm/atm_misc.c
+++ b/net/atm/atm_misc.c
@@ -2,37 +2,35 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL ICA */
4 4
5
6#include <linux/module.h> 5#include <linux/module.h>
7#include <linux/atm.h> 6#include <linux/atm.h>
8#include <linux/atmdev.h> 7#include <linux/atmdev.h>
9#include <linux/skbuff.h> 8#include <linux/skbuff.h>
10#include <linux/sonet.h> 9#include <linux/sonet.h>
11#include <linux/bitops.h> 10#include <linux/bitops.h>
11#include <linux/errno.h>
12#include <asm/atomic.h> 12#include <asm/atomic.h>
13#include <asm/errno.h>
14
15 13
16int atm_charge(struct atm_vcc *vcc,int truesize) 14int atm_charge(struct atm_vcc *vcc, int truesize)
17{ 15{
18 atm_force_charge(vcc,truesize); 16 atm_force_charge(vcc, truesize);
19 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf) 17 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
20 return 1; 18 return 1;
21 atm_return(vcc,truesize); 19 atm_return(vcc, truesize);
22 atomic_inc(&vcc->stats->rx_drop); 20 atomic_inc(&vcc->stats->rx_drop);
23 return 0; 21 return 0;
24} 22}
23EXPORT_SYMBOL(atm_charge);
25 24
26 25struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
27struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, 26 gfp_t gfp_flags)
28 gfp_t gfp_flags)
29{ 27{
30 struct sock *sk = sk_atm(vcc); 28 struct sock *sk = sk_atm(vcc);
31 int guess = atm_guess_pdu2truesize(pdu_size); 29 int guess = atm_guess_pdu2truesize(pdu_size);
32 30
33 atm_force_charge(vcc,guess); 31 atm_force_charge(vcc, guess);
34 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { 32 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
35 struct sk_buff *skb = alloc_skb(pdu_size,gfp_flags); 33 struct sk_buff *skb = alloc_skb(pdu_size, gfp_flags);
36 34
37 if (skb) { 35 if (skb) {
38 atomic_add(skb->truesize-guess, 36 atomic_add(skb->truesize-guess,
@@ -40,10 +38,11 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
40 return skb; 38 return skb;
41 } 39 }
42 } 40 }
43 atm_return(vcc,guess); 41 atm_return(vcc, guess);
44 atomic_inc(&vcc->stats->rx_drop); 42 atomic_inc(&vcc->stats->rx_drop);
45 return NULL; 43 return NULL;
46} 44}
45EXPORT_SYMBOL(atm_alloc_charge);
47 46
48 47
49/* 48/*
@@ -73,7 +72,6 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
73 * else * 72 * else *
74 */ 73 */
75 74
76
77int atm_pcr_goal(const struct atm_trafprm *tp) 75int atm_pcr_goal(const struct atm_trafprm *tp)
78{ 76{
79 if (tp->pcr && tp->pcr != ATM_MAX_PCR) 77 if (tp->pcr && tp->pcr != ATM_MAX_PCR)
@@ -84,26 +82,20 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
84 return -tp->max_pcr; 82 return -tp->max_pcr;
85 return 0; 83 return 0;
86} 84}
85EXPORT_SYMBOL(atm_pcr_goal);
87 86
88 87void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
89void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
90{ 88{
91#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) 89#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
92 __SONET_ITEMS 90 __SONET_ITEMS
93#undef __HANDLE_ITEM 91#undef __HANDLE_ITEM
94} 92}
93EXPORT_SYMBOL(sonet_copy_stats);
95 94
96 95void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
97void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
98{ 96{
99#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i) 97#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100 __SONET_ITEMS 98 __SONET_ITEMS
101#undef __HANDLE_ITEM 99#undef __HANDLE_ITEM
102} 100}
103
104
105EXPORT_SYMBOL(atm_charge);
106EXPORT_SYMBOL(atm_alloc_charge);
107EXPORT_SYMBOL(atm_pcr_goal);
108EXPORT_SYMBOL(sonet_copy_stats);
109EXPORT_SYMBOL(sonet_subtract_stats); 101EXPORT_SYMBOL(sonet_subtract_stats);
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index b5674dc2083d..f693b78eb467 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -42,13 +42,14 @@ static ssize_t show_atmaddress(struct device *cdev,
42 42
43 spin_lock_irqsave(&adev->lock, flags); 43 spin_lock_irqsave(&adev->lock, flags);
44 list_for_each_entry(aaddr, &adev->local, entry) { 44 list_for_each_entry(aaddr, &adev->local, entry) {
45 for(i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) { 45 for (i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) {
46 if (j == *fmt) { 46 if (j == *fmt) {
47 pos += sprintf(pos, "."); 47 pos += sprintf(pos, ".");
48 ++fmt; 48 ++fmt;
49 j = 0; 49 j = 0;
50 } 50 }
51 pos += sprintf(pos, "%02x", aaddr->addr.sas_addr.prv[i]); 51 pos += sprintf(pos, "%02x",
52 aaddr->addr.sas_addr.prv[i]);
52 } 53 }
53 pos += sprintf(pos, "\n"); 54 pos += sprintf(pos, "\n");
54 } 55 }
@@ -78,17 +79,17 @@ static ssize_t show_link_rate(struct device *cdev,
78 79
79 /* show the link rate, not the data rate */ 80 /* show the link rate, not the data rate */
80 switch (adev->link_rate) { 81 switch (adev->link_rate) {
81 case ATM_OC3_PCR: 82 case ATM_OC3_PCR:
82 link_rate = 155520000; 83 link_rate = 155520000;
83 break; 84 break;
84 case ATM_OC12_PCR: 85 case ATM_OC12_PCR:
85 link_rate = 622080000; 86 link_rate = 622080000;
86 break; 87 break;
87 case ATM_25_PCR: 88 case ATM_25_PCR:
88 link_rate = 25600000; 89 link_rate = 25600000;
89 break; 90 break;
90 default: 91 default:
91 link_rate = adev->link_rate * 8 * 53; 92 link_rate = adev->link_rate * 8 * 53;
92 } 93 }
93 pos += sprintf(pos, "%d\n", link_rate); 94 pos += sprintf(pos, "%d\n", link_rate);
94 95
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index c9230c398697..4d64d87e7578 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -6,6 +6,8 @@
6 * Eric Kinzie, 2006-2007, US Naval Research Laboratory 6 * Eric Kinzie, 2006-2007, US Naval Research Laboratory
7 */ 7 */
8 8
9#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
10
9#include <linux/module.h> 11#include <linux/module.h>
10#include <linux/init.h> 12#include <linux/init.h>
11#include <linux/kernel.h> 13#include <linux/kernel.h>
@@ -15,7 +17,7 @@
15#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
16#include <linux/rtnetlink.h> 18#include <linux/rtnetlink.h>
17#include <linux/ip.h> 19#include <linux/ip.h>
18#include <asm/uaccess.h> 20#include <linux/uaccess.h>
19#include <net/arp.h> 21#include <net/arp.h>
20#include <linux/atm.h> 22#include <linux/atm.h>
21#include <linux/atmdev.h> 23#include <linux/atmdev.h>
@@ -26,20 +28,14 @@
26 28
27#include "common.h" 29#include "common.h"
28 30
29#ifdef SKB_DEBUG
30static void skb_debug(const struct sk_buff *skb) 31static void skb_debug(const struct sk_buff *skb)
31{ 32{
33#ifdef SKB_DEBUG
32#define NUM2PRINT 50 34#define NUM2PRINT 50
33 char buf[NUM2PRINT * 3 + 1]; /* 3 chars per byte */ 35 print_hex_dump(KERN_DEBUG, "br2684: skb: ", DUMP_OFFSET,
34 int i = 0; 36 16, 1, skb->data, min(NUM2PRINT, skb->len), true);
35 for (i = 0; i < skb->len && i < NUM2PRINT; i++) {
36 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
37 }
38 printk(KERN_DEBUG "br2684: skb: %s\n", buf);
39}
40#else
41#define skb_debug(skb) do {} while (0)
42#endif 37#endif
38}
43 39
44#define BR2684_ETHERTYPE_LEN 2 40#define BR2684_ETHERTYPE_LEN 2
45#define BR2684_PAD_LEN 2 41#define BR2684_PAD_LEN 2
@@ -68,7 +64,7 @@ struct br2684_vcc {
68 struct atm_vcc *atmvcc; 64 struct atm_vcc *atmvcc;
69 struct net_device *device; 65 struct net_device *device;
70 /* keep old push, pop functions for chaining */ 66 /* keep old push, pop functions for chaining */
71 void (*old_push) (struct atm_vcc * vcc, struct sk_buff * skb); 67 void (*old_push)(struct atm_vcc *vcc, struct sk_buff *skb);
72 void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb); 68 void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb);
73 enum br2684_encaps encaps; 69 enum br2684_encaps encaps;
74 struct list_head brvccs; 70 struct list_head brvccs;
@@ -148,7 +144,7 @@ static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb)
148 struct br2684_vcc *brvcc = BR2684_VCC(vcc); 144 struct br2684_vcc *brvcc = BR2684_VCC(vcc);
149 struct net_device *net_dev = skb->dev; 145 struct net_device *net_dev = skb->dev;
150 146
151 pr_debug("br2684_pop(vcc %p ; net_dev %p )\n", vcc, net_dev); 147 pr_debug("(vcc %p ; net_dev %p )\n", vcc, net_dev);
152 brvcc->old_pop(vcc, skb); 148 brvcc->old_pop(vcc, skb);
153 149
154 if (!net_dev) 150 if (!net_dev)
@@ -244,7 +240,7 @@ static netdev_tx_t br2684_start_xmit(struct sk_buff *skb,
244 struct br2684_dev *brdev = BRPRIV(dev); 240 struct br2684_dev *brdev = BRPRIV(dev);
245 struct br2684_vcc *brvcc; 241 struct br2684_vcc *brvcc;
246 242
247 pr_debug("br2684_start_xmit, skb_dst(skb)=%p\n", skb_dst(skb)); 243 pr_debug("skb_dst(skb)=%p\n", skb_dst(skb));
248 read_lock(&devs_lock); 244 read_lock(&devs_lock);
249 brvcc = pick_outgoing_vcc(skb, brdev); 245 brvcc = pick_outgoing_vcc(skb, brdev);
250 if (brvcc == NULL) { 246 if (brvcc == NULL) {
@@ -300,7 +296,8 @@ static int br2684_setfilt(struct atm_vcc *atmvcc, void __user * arg)
300 struct br2684_dev *brdev; 296 struct br2684_dev *brdev;
301 read_lock(&devs_lock); 297 read_lock(&devs_lock);
302 brdev = BRPRIV(br2684_find_dev(&fs.ifspec)); 298 brdev = BRPRIV(br2684_find_dev(&fs.ifspec));
303 if (brdev == NULL || list_empty(&brdev->brvccs) || brdev->brvccs.next != brdev->brvccs.prev) /* >1 VCC */ 299 if (brdev == NULL || list_empty(&brdev->brvccs) ||
300 brdev->brvccs.next != brdev->brvccs.prev) /* >1 VCC */
304 brvcc = NULL; 301 brvcc = NULL;
305 else 302 else
306 brvcc = list_entry_brvcc(brdev->brvccs.next); 303 brvcc = list_entry_brvcc(brdev->brvccs.next);
@@ -352,7 +349,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
352 struct net_device *net_dev = brvcc->device; 349 struct net_device *net_dev = brvcc->device;
353 struct br2684_dev *brdev = BRPRIV(net_dev); 350 struct br2684_dev *brdev = BRPRIV(net_dev);
354 351
355 pr_debug("br2684_push\n"); 352 pr_debug("\n");
356 353
357 if (unlikely(skb == NULL)) { 354 if (unlikely(skb == NULL)) {
358 /* skb==NULL means VCC is being destroyed */ 355 /* skb==NULL means VCC is being destroyed */
@@ -376,29 +373,25 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
376 __skb_trim(skb, skb->len - 4); 373 __skb_trim(skb, skb->len - 4);
377 374
378 /* accept packets that have "ipv[46]" in the snap header */ 375 /* accept packets that have "ipv[46]" in the snap header */
379 if ((skb->len >= (sizeof(llc_oui_ipv4))) 376 if ((skb->len >= (sizeof(llc_oui_ipv4))) &&
380 && 377 (memcmp(skb->data, llc_oui_ipv4,
381 (memcmp 378 sizeof(llc_oui_ipv4) - BR2684_ETHERTYPE_LEN) == 0)) {
382 (skb->data, llc_oui_ipv4, 379 if (memcmp(skb->data + 6, ethertype_ipv6,
383 sizeof(llc_oui_ipv4) - BR2684_ETHERTYPE_LEN) == 0)) { 380 sizeof(ethertype_ipv6)) == 0)
384 if (memcmp
385 (skb->data + 6, ethertype_ipv6,
386 sizeof(ethertype_ipv6)) == 0)
387 skb->protocol = htons(ETH_P_IPV6); 381 skb->protocol = htons(ETH_P_IPV6);
388 else if (memcmp 382 else if (memcmp(skb->data + 6, ethertype_ipv4,
389 (skb->data + 6, ethertype_ipv4, 383 sizeof(ethertype_ipv4)) == 0)
390 sizeof(ethertype_ipv4)) == 0)
391 skb->protocol = htons(ETH_P_IP); 384 skb->protocol = htons(ETH_P_IP);
392 else 385 else
393 goto error; 386 goto error;
394 skb_pull(skb, sizeof(llc_oui_ipv4)); 387 skb_pull(skb, sizeof(llc_oui_ipv4));
395 skb_reset_network_header(skb); 388 skb_reset_network_header(skb);
396 skb->pkt_type = PACKET_HOST; 389 skb->pkt_type = PACKET_HOST;
397 /* 390 /*
398 * Let us waste some time for checking the encapsulation. 391 * Let us waste some time for checking the encapsulation.
399 * Note, that only 7 char is checked so frames with a valid FCS 392 * Note, that only 7 char is checked so frames with a valid FCS
400 * are also accepted (but FCS is not checked of course). 393 * are also accepted (but FCS is not checked of course).
401 */ 394 */
402 } else if ((skb->len >= sizeof(llc_oui_pid_pad)) && 395 } else if ((skb->len >= sizeof(llc_oui_pid_pad)) &&
403 (memcmp(skb->data, llc_oui_pid_pad, 7) == 0)) { 396 (memcmp(skb->data, llc_oui_pid_pad, 7) == 0)) {
404 skb_pull(skb, sizeof(llc_oui_pid_pad)); 397 skb_pull(skb, sizeof(llc_oui_pid_pad));
@@ -479,8 +472,7 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
479 write_lock_irq(&devs_lock); 472 write_lock_irq(&devs_lock);
480 net_dev = br2684_find_dev(&be.ifspec); 473 net_dev = br2684_find_dev(&be.ifspec);
481 if (net_dev == NULL) { 474 if (net_dev == NULL) {
482 printk(KERN_ERR 475 pr_err("tried to attach to non-existant device\n");
483 "br2684: tried to attach to non-existant device\n");
484 err = -ENXIO; 476 err = -ENXIO;
485 goto error; 477 goto error;
486 } 478 }
@@ -494,17 +486,16 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
494 err = -EEXIST; 486 err = -EEXIST;
495 goto error; 487 goto error;
496 } 488 }
497 if (be.fcs_in != BR2684_FCSIN_NO || be.fcs_out != BR2684_FCSOUT_NO || 489 if (be.fcs_in != BR2684_FCSIN_NO ||
498 be.fcs_auto || be.has_vpiid || be.send_padding || (be.encaps != 490 be.fcs_out != BR2684_FCSOUT_NO ||
499 BR2684_ENCAPS_VC 491 be.fcs_auto || be.has_vpiid || be.send_padding ||
500 && be.encaps != 492 (be.encaps != BR2684_ENCAPS_VC &&
501 BR2684_ENCAPS_LLC) 493 be.encaps != BR2684_ENCAPS_LLC) ||
502 || be.min_size != 0) { 494 be.min_size != 0) {
503 err = -EINVAL; 495 err = -EINVAL;
504 goto error; 496 goto error;
505 } 497 }
506 pr_debug("br2684_regvcc vcc=%p, encaps=%d, brvcc=%p\n", atmvcc, 498 pr_debug("vcc=%p, encaps=%d, brvcc=%p\n", atmvcc, be.encaps, brvcc);
507 be.encaps, brvcc);
508 if (list_empty(&brdev->brvccs) && !brdev->mac_was_set) { 499 if (list_empty(&brdev->brvccs) && !brdev->mac_was_set) {
509 unsigned char *esi = atmvcc->dev->esi; 500 unsigned char *esi = atmvcc->dev->esi;
510 if (esi[0] | esi[1] | esi[2] | esi[3] | esi[4] | esi[5]) 501 if (esi[0] | esi[1] | esi[2] | esi[3] | esi[4] | esi[5])
@@ -541,7 +532,8 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
541 } 532 }
542 __module_get(THIS_MODULE); 533 __module_get(THIS_MODULE);
543 return 0; 534 return 0;
544 error: 535
536error:
545 write_unlock_irq(&devs_lock); 537 write_unlock_irq(&devs_lock);
546 kfree(brvcc); 538 kfree(brvcc);
547 return err; 539 return err;
@@ -587,7 +579,7 @@ static void br2684_setup_routed(struct net_device *netdev)
587 INIT_LIST_HEAD(&brdev->brvccs); 579 INIT_LIST_HEAD(&brdev->brvccs);
588} 580}
589 581
590static int br2684_create(void __user * arg) 582static int br2684_create(void __user *arg)
591{ 583{
592 int err; 584 int err;
593 struct net_device *netdev; 585 struct net_device *netdev;
@@ -595,11 +587,10 @@ static int br2684_create(void __user * arg)
595 struct atm_newif_br2684 ni; 587 struct atm_newif_br2684 ni;
596 enum br2684_payload payload; 588 enum br2684_payload payload;
597 589
598 pr_debug("br2684_create\n"); 590 pr_debug("\n");
599 591
600 if (copy_from_user(&ni, arg, sizeof ni)) { 592 if (copy_from_user(&ni, arg, sizeof ni))
601 return -EFAULT; 593 return -EFAULT;
602 }
603 594
604 if (ni.media & BR2684_FLAG_ROUTED) 595 if (ni.media & BR2684_FLAG_ROUTED)
605 payload = p_routed; 596 payload = p_routed;
@@ -607,9 +598,8 @@ static int br2684_create(void __user * arg)
607 payload = p_bridged; 598 payload = p_bridged;
608 ni.media &= 0xffff; /* strip flags */ 599 ni.media &= 0xffff; /* strip flags */
609 600
610 if (ni.media != BR2684_MEDIA_ETHERNET || ni.mtu != 1500) { 601 if (ni.media != BR2684_MEDIA_ETHERNET || ni.mtu != 1500)
611 return -EINVAL; 602 return -EINVAL;
612 }
613 603
614 netdev = alloc_netdev(sizeof(struct br2684_dev), 604 netdev = alloc_netdev(sizeof(struct br2684_dev),
615 ni.ifname[0] ? ni.ifname : "nas%d", 605 ni.ifname[0] ? ni.ifname : "nas%d",
@@ -624,7 +614,7 @@ static int br2684_create(void __user * arg)
624 /* open, stop, do_ioctl ? */ 614 /* open, stop, do_ioctl ? */
625 err = register_netdev(netdev); 615 err = register_netdev(netdev);
626 if (err < 0) { 616 if (err < 0) {
627 printk(KERN_ERR "br2684_create: register_netdev failed\n"); 617 pr_err("register_netdev failed\n");
628 free_netdev(netdev); 618 free_netdev(netdev);
629 return err; 619 return err;
630 } 620 }
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 64629c354343..ebfa022008f7 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -2,6 +2,8 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
6
5#include <linux/string.h> 7#include <linux/string.h>
6#include <linux/errno.h> 8#include <linux/errno.h>
7#include <linux/kernel.h> /* for UINT_MAX */ 9#include <linux/kernel.h> /* for UINT_MAX */
@@ -30,10 +32,10 @@
30#include <linux/jhash.h> 32#include <linux/jhash.h>
31#include <net/route.h> /* for struct rtable and routing */ 33#include <net/route.h> /* for struct rtable and routing */
32#include <net/icmp.h> /* icmp_send */ 34#include <net/icmp.h> /* icmp_send */
33#include <asm/param.h> /* for HZ */ 35#include <linux/param.h> /* for HZ */
36#include <linux/uaccess.h>
34#include <asm/byteorder.h> /* for htons etc. */ 37#include <asm/byteorder.h> /* for htons etc. */
35#include <asm/system.h> /* save/restore_flags */ 38#include <asm/system.h> /* save/restore_flags */
36#include <asm/uaccess.h>
37#include <asm/atomic.h> 39#include <asm/atomic.h>
38 40
39#include "common.h" 41#include "common.h"
@@ -51,13 +53,13 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
51 struct atmarp_ctrl *ctrl; 53 struct atmarp_ctrl *ctrl;
52 struct sk_buff *skb; 54 struct sk_buff *skb;
53 55
54 pr_debug("to_atmarpd(%d)\n", type); 56 pr_debug("(%d)\n", type);
55 if (!atmarpd) 57 if (!atmarpd)
56 return -EUNATCH; 58 return -EUNATCH;
57 skb = alloc_skb(sizeof(struct atmarp_ctrl),GFP_ATOMIC); 59 skb = alloc_skb(sizeof(struct atmarp_ctrl), GFP_ATOMIC);
58 if (!skb) 60 if (!skb)
59 return -ENOMEM; 61 return -ENOMEM;
60 ctrl = (struct atmarp_ctrl *) skb_put(skb,sizeof(struct atmarp_ctrl)); 62 ctrl = (struct atmarp_ctrl *)skb_put(skb, sizeof(struct atmarp_ctrl));
61 ctrl->type = type; 63 ctrl->type = type;
62 ctrl->itf_num = itf; 64 ctrl->itf_num = itf;
63 ctrl->ip = ip; 65 ctrl->ip = ip;
@@ -71,8 +73,7 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
71 73
72static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry) 74static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry)
73{ 75{
74 pr_debug("link_vcc %p to entry %p (neigh %p)\n", clip_vcc, entry, 76 pr_debug("%p to entry %p (neigh %p)\n", clip_vcc, entry, entry->neigh);
75 entry->neigh);
76 clip_vcc->entry = entry; 77 clip_vcc->entry = entry;
77 clip_vcc->xoff = 0; /* @@@ may overrun buffer by one packet */ 78 clip_vcc->xoff = 0; /* @@@ may overrun buffer by one packet */
78 clip_vcc->next = entry->vccs; 79 clip_vcc->next = entry->vccs;
@@ -86,7 +87,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
86 struct clip_vcc **walk; 87 struct clip_vcc **walk;
87 88
88 if (!entry) { 89 if (!entry) {
89 printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); 90 pr_crit("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
90 return; 91 return;
91 } 92 }
92 netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */ 93 netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */
@@ -106,13 +107,11 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
106 error = neigh_update(entry->neigh, NULL, NUD_NONE, 107 error = neigh_update(entry->neigh, NULL, NUD_NONE,
107 NEIGH_UPDATE_F_ADMIN); 108 NEIGH_UPDATE_F_ADMIN);
108 if (error) 109 if (error)
109 printk(KERN_CRIT "unlink_clip_vcc: " 110 pr_crit("neigh_update failed with %d\n", error);
110 "neigh_update failed with %d\n", error);
111 goto out; 111 goto out;
112 } 112 }
113 printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc " 113 pr_crit("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc);
114 "0x%p)\n", entry, clip_vcc); 114out:
115 out:
116 netif_tx_unlock_bh(entry->neigh->dev); 115 netif_tx_unlock_bh(entry->neigh->dev);
117} 116}
118 117
@@ -127,7 +126,7 @@ static int neigh_check_cb(struct neighbour *n)
127 126
128 if (cv->idle_timeout && time_after(jiffies, exp)) { 127 if (cv->idle_timeout && time_after(jiffies, exp)) {
129 pr_debug("releasing vcc %p->%p of entry %p\n", 128 pr_debug("releasing vcc %p->%p of entry %p\n",
130 cv, cv->vcc, entry); 129 cv, cv->vcc, entry);
131 vcc_release_async(cv->vcc, -ETIMEDOUT); 130 vcc_release_async(cv->vcc, -ETIMEDOUT);
132 } 131 }
133 } 132 }
@@ -139,7 +138,7 @@ static int neigh_check_cb(struct neighbour *n)
139 struct sk_buff *skb; 138 struct sk_buff *skb;
140 139
141 pr_debug("destruction postponed with ref %d\n", 140 pr_debug("destruction postponed with ref %d\n",
142 atomic_read(&n->refcnt)); 141 atomic_read(&n->refcnt));
143 142
144 while ((skb = skb_dequeue(&n->arp_queue)) != NULL) 143 while ((skb = skb_dequeue(&n->arp_queue)) != NULL)
145 dev_kfree_skb(skb); 144 dev_kfree_skb(skb);
@@ -163,7 +162,7 @@ static int clip_arp_rcv(struct sk_buff *skb)
163{ 162{
164 struct atm_vcc *vcc; 163 struct atm_vcc *vcc;
165 164
166 pr_debug("clip_arp_rcv\n"); 165 pr_debug("\n");
167 vcc = ATM_SKB(skb)->vcc; 166 vcc = ATM_SKB(skb)->vcc;
168 if (!vcc || !atm_charge(vcc, skb->truesize)) { 167 if (!vcc || !atm_charge(vcc, skb->truesize)) {
169 dev_kfree_skb_any(skb); 168 dev_kfree_skb_any(skb);
@@ -188,7 +187,7 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
188{ 187{
189 struct clip_vcc *clip_vcc = CLIP_VCC(vcc); 188 struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
190 189
191 pr_debug("clip push\n"); 190 pr_debug("\n");
192 if (!skb) { 191 if (!skb) {
193 pr_debug("removing VCC %p\n", clip_vcc); 192 pr_debug("removing VCC %p\n", clip_vcc);
194 if (clip_vcc->entry) 193 if (clip_vcc->entry)
@@ -206,12 +205,12 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
206 } 205 }
207 ATM_SKB(skb)->vcc = vcc; 206 ATM_SKB(skb)->vcc = vcc;
208 skb_reset_mac_header(skb); 207 skb_reset_mac_header(skb);
209 if (!clip_vcc->encap 208 if (!clip_vcc->encap ||
210 || skb->len < RFC1483LLC_LEN 209 skb->len < RFC1483LLC_LEN ||
211 || memcmp(skb->data, llc_oui, sizeof (llc_oui))) 210 memcmp(skb->data, llc_oui, sizeof(llc_oui)))
212 skb->protocol = htons(ETH_P_IP); 211 skb->protocol = htons(ETH_P_IP);
213 else { 212 else {
214 skb->protocol = ((__be16 *) skb->data)[3]; 213 skb->protocol = ((__be16 *)skb->data)[3];
215 skb_pull(skb, RFC1483LLC_LEN); 214 skb_pull(skb, RFC1483LLC_LEN);
216 if (skb->protocol == htons(ETH_P_ARP)) { 215 if (skb->protocol == htons(ETH_P_ARP)) {
217 skb->dev->stats.rx_packets++; 216 skb->dev->stats.rx_packets++;
@@ -239,7 +238,7 @@ static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb)
239 int old; 238 int old;
240 unsigned long flags; 239 unsigned long flags;
241 240
242 pr_debug("clip_pop(vcc %p)\n", vcc); 241 pr_debug("(vcc %p)\n", vcc);
243 clip_vcc->old_pop(vcc, skb); 242 clip_vcc->old_pop(vcc, skb);
244 /* skb->dev == NULL in outbound ARP packets */ 243 /* skb->dev == NULL in outbound ARP packets */
245 if (!dev) 244 if (!dev)
@@ -255,7 +254,7 @@ static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb)
255 254
256static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb) 255static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb)
257{ 256{
258 pr_debug("clip_neigh_solicit (neigh %p, skb %p)\n", neigh, skb); 257 pr_debug("(neigh %p, skb %p)\n", neigh, skb);
259 to_atmarpd(act_need, PRIV(neigh->dev)->number, NEIGH2ENTRY(neigh)->ip); 258 to_atmarpd(act_need, PRIV(neigh->dev)->number, NEIGH2ENTRY(neigh)->ip);
260} 259}
261 260
@@ -284,7 +283,7 @@ static int clip_constructor(struct neighbour *neigh)
284 struct in_device *in_dev; 283 struct in_device *in_dev;
285 struct neigh_parms *parms; 284 struct neigh_parms *parms;
286 285
287 pr_debug("clip_constructor (neigh %p, entry %p)\n", neigh, entry); 286 pr_debug("(neigh %p, entry %p)\n", neigh, entry);
288 neigh->type = inet_addr_type(&init_net, entry->ip); 287 neigh->type = inet_addr_type(&init_net, entry->ip);
289 if (neigh->type != RTN_UNICAST) 288 if (neigh->type != RTN_UNICAST)
290 return -EINVAL; 289 return -EINVAL;
@@ -369,9 +368,9 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
369 int old; 368 int old;
370 unsigned long flags; 369 unsigned long flags;
371 370
372 pr_debug("clip_start_xmit (skb %p)\n", skb); 371 pr_debug("(skb %p)\n", skb);
373 if (!skb_dst(skb)) { 372 if (!skb_dst(skb)) {
374 printk(KERN_ERR "clip_start_xmit: skb_dst(skb) == NULL\n"); 373 pr_err("skb_dst(skb) == NULL\n");
375 dev_kfree_skb(skb); 374 dev_kfree_skb(skb);
376 dev->stats.tx_dropped++; 375 dev->stats.tx_dropped++;
377 return NETDEV_TX_OK; 376 return NETDEV_TX_OK;
@@ -385,7 +384,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
385 return 0; 384 return 0;
386 } 385 }
387#endif 386#endif
388 printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR !\n"); 387 pr_err("NO NEIGHBOUR !\n");
389 dev_kfree_skb(skb); 388 dev_kfree_skb(skb);
390 dev->stats.tx_dropped++; 389 dev->stats.tx_dropped++;
391 return NETDEV_TX_OK; 390 return NETDEV_TX_OK;
@@ -421,7 +420,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
421 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev); 420 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
422 old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */ 421 old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
423 if (old) { 422 if (old) {
424 printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n"); 423 pr_warning("XOFF->XOFF transition\n");
425 return NETDEV_TX_OK; 424 return NETDEV_TX_OK;
426 } 425 }
427 dev->stats.tx_packets++; 426 dev->stats.tx_packets++;
@@ -456,7 +455,7 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout)
456 clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL); 455 clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL);
457 if (!clip_vcc) 456 if (!clip_vcc)
458 return -ENOMEM; 457 return -ENOMEM;
459 pr_debug("mkip clip_vcc %p vcc %p\n", clip_vcc, vcc); 458 pr_debug("%p vcc %p\n", clip_vcc, vcc);
460 clip_vcc->vcc = vcc; 459 clip_vcc->vcc = vcc;
461 vcc->user_back = clip_vcc; 460 vcc->user_back = clip_vcc;
462 set_bit(ATM_VF_IS_CLIP, &vcc->flags); 461 set_bit(ATM_VF_IS_CLIP, &vcc->flags);
@@ -506,16 +505,16 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
506 struct rtable *rt; 505 struct rtable *rt;
507 506
508 if (vcc->push != clip_push) { 507 if (vcc->push != clip_push) {
509 printk(KERN_WARNING "clip_setentry: non-CLIP VCC\n"); 508 pr_warning("non-CLIP VCC\n");
510 return -EBADF; 509 return -EBADF;
511 } 510 }
512 clip_vcc = CLIP_VCC(vcc); 511 clip_vcc = CLIP_VCC(vcc);
513 if (!ip) { 512 if (!ip) {
514 if (!clip_vcc->entry) { 513 if (!clip_vcc->entry) {
515 printk(KERN_ERR "hiding hidden ATMARP entry\n"); 514 pr_err("hiding hidden ATMARP entry\n");
516 return 0; 515 return 0;
517 } 516 }
518 pr_debug("setentry: remove\n"); 517 pr_debug("remove\n");
519 unlink_clip_vcc(clip_vcc); 518 unlink_clip_vcc(clip_vcc);
520 return 0; 519 return 0;
521 } 520 }
@@ -529,9 +528,9 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
529 entry = NEIGH2ENTRY(neigh); 528 entry = NEIGH2ENTRY(neigh);
530 if (entry != clip_vcc->entry) { 529 if (entry != clip_vcc->entry) {
531 if (!clip_vcc->entry) 530 if (!clip_vcc->entry)
532 pr_debug("setentry: add\n"); 531 pr_debug("add\n");
533 else { 532 else {
534 pr_debug("setentry: update\n"); 533 pr_debug("update\n");
535 unlink_clip_vcc(clip_vcc); 534 unlink_clip_vcc(clip_vcc);
536 } 535 }
537 link_vcc(clip_vcc, entry); 536 link_vcc(clip_vcc, entry);
@@ -614,16 +613,16 @@ static int clip_device_event(struct notifier_block *this, unsigned long event,
614 613
615 switch (event) { 614 switch (event) {
616 case NETDEV_UP: 615 case NETDEV_UP:
617 pr_debug("clip_device_event NETDEV_UP\n"); 616 pr_debug("NETDEV_UP\n");
618 to_atmarpd(act_up, PRIV(dev)->number, 0); 617 to_atmarpd(act_up, PRIV(dev)->number, 0);
619 break; 618 break;
620 case NETDEV_GOING_DOWN: 619 case NETDEV_GOING_DOWN:
621 pr_debug("clip_device_event NETDEV_DOWN\n"); 620 pr_debug("NETDEV_DOWN\n");
622 to_atmarpd(act_down, PRIV(dev)->number, 0); 621 to_atmarpd(act_down, PRIV(dev)->number, 0);
623 break; 622 break;
624 case NETDEV_CHANGE: 623 case NETDEV_CHANGE:
625 case NETDEV_CHANGEMTU: 624 case NETDEV_CHANGEMTU:
626 pr_debug("clip_device_event NETDEV_CHANGE*\n"); 625 pr_debug("NETDEV_CHANGE*\n");
627 to_atmarpd(act_change, PRIV(dev)->number, 0); 626 to_atmarpd(act_change, PRIV(dev)->number, 0);
628 break; 627 break;
629 } 628 }
@@ -645,7 +644,6 @@ static int clip_inet_event(struct notifier_block *this, unsigned long event,
645 return clip_device_event(this, NETDEV_CHANGE, in_dev->dev); 644 return clip_device_event(this, NETDEV_CHANGE, in_dev->dev);
646} 645}
647 646
648
649static struct notifier_block clip_dev_notifier = { 647static struct notifier_block clip_dev_notifier = {
650 .notifier_call = clip_device_event, 648 .notifier_call = clip_device_event,
651}; 649};
@@ -660,7 +658,7 @@ static struct notifier_block clip_inet_notifier = {
660 658
661static void atmarpd_close(struct atm_vcc *vcc) 659static void atmarpd_close(struct atm_vcc *vcc)
662{ 660{
663 pr_debug("atmarpd_close\n"); 661 pr_debug("\n");
664 662
665 rtnl_lock(); 663 rtnl_lock();
666 atmarpd = NULL; 664 atmarpd = NULL;
@@ -671,7 +669,6 @@ static void atmarpd_close(struct atm_vcc *vcc)
671 module_put(THIS_MODULE); 669 module_put(THIS_MODULE);
672} 670}
673 671
674
675static struct atmdev_ops atmarpd_dev_ops = { 672static struct atmdev_ops atmarpd_dev_ops = {
676 .close = atmarpd_close 673 .close = atmarpd_close
677}; 674};
@@ -693,11 +690,11 @@ static int atm_init_atmarp(struct atm_vcc *vcc)
693 return -EADDRINUSE; 690 return -EADDRINUSE;
694 } 691 }
695 692
696 mod_timer(&idle_timer, jiffies+CLIP_CHECK_INTERVAL*HZ); 693 mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
697 694
698 atmarpd = vcc; 695 atmarpd = vcc;
699 set_bit(ATM_VF_META,&vcc->flags); 696 set_bit(ATM_VF_META, &vcc->flags);
700 set_bit(ATM_VF_READY,&vcc->flags); 697 set_bit(ATM_VF_READY, &vcc->flags);
701 /* allow replies and avoid getting closed if signaling dies */ 698 /* allow replies and avoid getting closed if signaling dies */
702 vcc->dev = &atmarpd_dev; 699 vcc->dev = &atmarpd_dev;
703 vcc_insert_socket(sk_atm(vcc)); 700 vcc_insert_socket(sk_atm(vcc));
@@ -950,8 +947,7 @@ static int __init atm_clip_init(void)
950 947
951 p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops); 948 p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops);
952 if (!p) { 949 if (!p) {
953 printk(KERN_ERR "Unable to initialize " 950 pr_err("Unable to initialize /proc/net/atm/arp\n");
954 "/proc/net/atm/arp\n");
955 atm_clip_exit_noproc(); 951 atm_clip_exit_noproc();
956 return -ENOMEM; 952 return -ENOMEM;
957 } 953 }
diff --git a/net/atm/common.c b/net/atm/common.c
index d61e051e0a3f..74d095a081e3 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -2,6 +2,7 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
5 6
6#include <linux/module.h> 7#include <linux/module.h>
7#include <linux/kmod.h> 8#include <linux/kmod.h>
@@ -18,11 +19,10 @@
18#include <linux/bitops.h> 19#include <linux/bitops.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <net/sock.h> /* struct sock */ 21#include <net/sock.h> /* struct sock */
22#include <linux/uaccess.h>
23#include <linux/poll.h>
21 24
22#include <asm/uaccess.h>
23#include <asm/atomic.h> 25#include <asm/atomic.h>
24#include <asm/poll.h>
25
26 26
27#include "resources.h" /* atm_find_dev */ 27#include "resources.h" /* atm_find_dev */
28#include "common.h" /* prototypes */ 28#include "common.h" /* prototypes */
@@ -31,13 +31,15 @@
31#include "signaling.h" /* for WAITING and sigd_attach */ 31#include "signaling.h" /* for WAITING and sigd_attach */
32 32
33struct hlist_head vcc_hash[VCC_HTABLE_SIZE]; 33struct hlist_head vcc_hash[VCC_HTABLE_SIZE];
34EXPORT_SYMBOL(vcc_hash);
35
34DEFINE_RWLOCK(vcc_sklist_lock); 36DEFINE_RWLOCK(vcc_sklist_lock);
37EXPORT_SYMBOL(vcc_sklist_lock);
35 38
36static void __vcc_insert_socket(struct sock *sk) 39static void __vcc_insert_socket(struct sock *sk)
37{ 40{
38 struct atm_vcc *vcc = atm_sk(sk); 41 struct atm_vcc *vcc = atm_sk(sk);
39 struct hlist_head *head = &vcc_hash[vcc->vci & 42 struct hlist_head *head = &vcc_hash[vcc->vci & (VCC_HTABLE_SIZE - 1)];
40 (VCC_HTABLE_SIZE - 1)];
41 sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1); 43 sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1);
42 sk_add_node(sk, head); 44 sk_add_node(sk, head);
43} 45}
@@ -48,6 +50,7 @@ void vcc_insert_socket(struct sock *sk)
48 __vcc_insert_socket(sk); 50 __vcc_insert_socket(sk);
49 write_unlock_irq(&vcc_sklist_lock); 51 write_unlock_irq(&vcc_sklist_lock);
50} 52}
53EXPORT_SYMBOL(vcc_insert_socket);
51 54
52static void vcc_remove_socket(struct sock *sk) 55static void vcc_remove_socket(struct sock *sk)
53{ 56{
@@ -56,37 +59,32 @@ static void vcc_remove_socket(struct sock *sk)
56 write_unlock_irq(&vcc_sklist_lock); 59 write_unlock_irq(&vcc_sklist_lock);
57} 60}
58 61
59 62static struct sk_buff *alloc_tx(struct atm_vcc *vcc, unsigned int size)
60static struct sk_buff *alloc_tx(struct atm_vcc *vcc,unsigned int size)
61{ 63{
62 struct sk_buff *skb; 64 struct sk_buff *skb;
63 struct sock *sk = sk_atm(vcc); 65 struct sock *sk = sk_atm(vcc);
64 66
65 if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) { 67 if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) {
66 pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n", 68 pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n",
67 sk_wmem_alloc_get(sk), size, 69 sk_wmem_alloc_get(sk), size, sk->sk_sndbuf);
68 sk->sk_sndbuf);
69 return NULL; 70 return NULL;
70 } 71 }
71 while (!(skb = alloc_skb(size, GFP_KERNEL))) 72 while (!(skb = alloc_skb(size, GFP_KERNEL)))
72 schedule(); 73 schedule();
73 pr_debug("AlTx %d += %d\n", sk_wmem_alloc_get(sk), skb->truesize); 74 pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
74 atomic_add(skb->truesize, &sk->sk_wmem_alloc); 75 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
75 return skb; 76 return skb;
76} 77}
77 78
78
79EXPORT_SYMBOL(vcc_hash);
80EXPORT_SYMBOL(vcc_sklist_lock);
81EXPORT_SYMBOL(vcc_insert_socket);
82
83static void vcc_sock_destruct(struct sock *sk) 79static void vcc_sock_destruct(struct sock *sk)
84{ 80{
85 if (atomic_read(&sk->sk_rmem_alloc)) 81 if (atomic_read(&sk->sk_rmem_alloc))
86 printk(KERN_DEBUG "vcc_sock_destruct: rmem leakage (%d bytes) detected.\n", atomic_read(&sk->sk_rmem_alloc)); 82 printk(KERN_DEBUG "%s: rmem leakage (%d bytes) detected.\n",
83 __func__, atomic_read(&sk->sk_rmem_alloc));
87 84
88 if (atomic_read(&sk->sk_wmem_alloc)) 85 if (atomic_read(&sk->sk_wmem_alloc))
89 printk(KERN_DEBUG "vcc_sock_destruct: wmem leakage (%d bytes) detected.\n", atomic_read(&sk->sk_wmem_alloc)); 86 printk(KERN_DEBUG "%s: wmem leakage (%d bytes) detected.\n",
87 __func__, atomic_read(&sk->sk_wmem_alloc));
90} 88}
91 89
92static void vcc_def_wakeup(struct sock *sk) 90static void vcc_def_wakeup(struct sock *sk)
@@ -142,8 +140,8 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
142 140
143 vcc = atm_sk(sk); 141 vcc = atm_sk(sk);
144 vcc->dev = NULL; 142 vcc->dev = NULL;
145 memset(&vcc->local,0,sizeof(struct sockaddr_atmsvc)); 143 memset(&vcc->local, 0, sizeof(struct sockaddr_atmsvc));
146 memset(&vcc->remote,0,sizeof(struct sockaddr_atmsvc)); 144 memset(&vcc->remote, 0, sizeof(struct sockaddr_atmsvc));
147 vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */ 145 vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */
148 atomic_set(&sk->sk_wmem_alloc, 1); 146 atomic_set(&sk->sk_wmem_alloc, 1);
149 atomic_set(&sk->sk_rmem_alloc, 0); 147 atomic_set(&sk->sk_rmem_alloc, 0);
@@ -156,7 +154,6 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
156 return 0; 154 return 0;
157} 155}
158 156
159
160static void vcc_destroy_socket(struct sock *sk) 157static void vcc_destroy_socket(struct sock *sk)
161{ 158{
162 struct atm_vcc *vcc = atm_sk(sk); 159 struct atm_vcc *vcc = atm_sk(sk);
@@ -171,7 +168,7 @@ static void vcc_destroy_socket(struct sock *sk)
171 vcc->push(vcc, NULL); /* atmarpd has no push */ 168 vcc->push(vcc, NULL); /* atmarpd has no push */
172 169
173 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 170 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
174 atm_return(vcc,skb->truesize); 171 atm_return(vcc, skb->truesize);
175 kfree_skb(skb); 172 kfree_skb(skb);
176 } 173 }
177 174
@@ -182,7 +179,6 @@ static void vcc_destroy_socket(struct sock *sk)
182 vcc_remove_socket(sk); 179 vcc_remove_socket(sk);
183} 180}
184 181
185
186int vcc_release(struct socket *sock) 182int vcc_release(struct socket *sock)
187{ 183{
188 struct sock *sk = sock->sk; 184 struct sock *sk = sock->sk;
@@ -197,7 +193,6 @@ int vcc_release(struct socket *sock)
197 return 0; 193 return 0;
198} 194}
199 195
200
201void vcc_release_async(struct atm_vcc *vcc, int reply) 196void vcc_release_async(struct atm_vcc *vcc, int reply)
202{ 197{
203 struct sock *sk = sk_atm(vcc); 198 struct sock *sk = sk_atm(vcc);
@@ -208,8 +203,6 @@ void vcc_release_async(struct atm_vcc *vcc, int reply)
208 clear_bit(ATM_VF_WAITING, &vcc->flags); 203 clear_bit(ATM_VF_WAITING, &vcc->flags);
209 sk->sk_state_change(sk); 204 sk->sk_state_change(sk);
210} 205}
211
212
213EXPORT_SYMBOL(vcc_release_async); 206EXPORT_SYMBOL(vcc_release_async);
214 207
215 208
@@ -235,37 +228,37 @@ void atm_dev_release_vccs(struct atm_dev *dev)
235 write_unlock_irq(&vcc_sklist_lock); 228 write_unlock_irq(&vcc_sklist_lock);
236} 229}
237 230
238 231static int adjust_tp(struct atm_trafprm *tp, unsigned char aal)
239static int adjust_tp(struct atm_trafprm *tp,unsigned char aal)
240{ 232{
241 int max_sdu; 233 int max_sdu;
242 234
243 if (!tp->traffic_class) return 0; 235 if (!tp->traffic_class)
236 return 0;
244 switch (aal) { 237 switch (aal) {
245 case ATM_AAL0: 238 case ATM_AAL0:
246 max_sdu = ATM_CELL_SIZE-1; 239 max_sdu = ATM_CELL_SIZE-1;
247 break; 240 break;
248 case ATM_AAL34: 241 case ATM_AAL34:
249 max_sdu = ATM_MAX_AAL34_PDU; 242 max_sdu = ATM_MAX_AAL34_PDU;
250 break; 243 break;
251 default: 244 default:
252 printk(KERN_WARNING "ATM: AAL problems ... " 245 pr_warning("AAL problems ... (%d)\n", aal);
253 "(%d)\n",aal); 246 /* fall through */
254 /* fall through */ 247 case ATM_AAL5:
255 case ATM_AAL5: 248 max_sdu = ATM_MAX_AAL5_PDU;
256 max_sdu = ATM_MAX_AAL5_PDU;
257 } 249 }
258 if (!tp->max_sdu) tp->max_sdu = max_sdu; 250 if (!tp->max_sdu)
259 else if (tp->max_sdu > max_sdu) return -EINVAL; 251 tp->max_sdu = max_sdu;
260 if (!tp->max_cdv) tp->max_cdv = ATM_MAX_CDV; 252 else if (tp->max_sdu > max_sdu)
253 return -EINVAL;
254 if (!tp->max_cdv)
255 tp->max_cdv = ATM_MAX_CDV;
261 return 0; 256 return 0;
262} 257}
263 258
264
265static int check_ci(const struct atm_vcc *vcc, short vpi, int vci) 259static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
266{ 260{
267 struct hlist_head *head = &vcc_hash[vci & 261 struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)];
268 (VCC_HTABLE_SIZE - 1)];
269 struct hlist_node *node; 262 struct hlist_node *node;
270 struct sock *s; 263 struct sock *s;
271 struct atm_vcc *walk; 264 struct atm_vcc *walk;
@@ -289,7 +282,6 @@ static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
289 return 0; 282 return 0;
290} 283}
291 284
292
293static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci) 285static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci)
294{ 286{
295 static short p; /* poor man's per-device cache */ 287 static short p; /* poor man's per-device cache */
@@ -327,14 +319,13 @@ static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci)
327 if ((c == ATM_NOT_RSV_VCI || *vci != ATM_VCI_ANY) && 319 if ((c == ATM_NOT_RSV_VCI || *vci != ATM_VCI_ANY) &&
328 *vpi == ATM_VPI_ANY) { 320 *vpi == ATM_VPI_ANY) {
329 p++; 321 p++;
330 if (p >= 1 << vcc->dev->ci_range.vpi_bits) p = 0; 322 if (p >= 1 << vcc->dev->ci_range.vpi_bits)
323 p = 0;
331 } 324 }
332 } 325 } while (old_p != p || old_c != c);
333 while (old_p != p || old_c != c);
334 return -EADDRINUSE; 326 return -EADDRINUSE;
335} 327}
336 328
337
338static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi, 329static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi,
339 int vci) 330 int vci)
340{ 331{
@@ -362,37 +353,46 @@ static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi,
362 __vcc_insert_socket(sk); 353 __vcc_insert_socket(sk);
363 write_unlock_irq(&vcc_sklist_lock); 354 write_unlock_irq(&vcc_sklist_lock);
364 switch (vcc->qos.aal) { 355 switch (vcc->qos.aal) {
365 case ATM_AAL0: 356 case ATM_AAL0:
366 error = atm_init_aal0(vcc); 357 error = atm_init_aal0(vcc);
367 vcc->stats = &dev->stats.aal0; 358 vcc->stats = &dev->stats.aal0;
368 break; 359 break;
369 case ATM_AAL34: 360 case ATM_AAL34:
370 error = atm_init_aal34(vcc); 361 error = atm_init_aal34(vcc);
371 vcc->stats = &dev->stats.aal34; 362 vcc->stats = &dev->stats.aal34;
372 break; 363 break;
373 case ATM_NO_AAL: 364 case ATM_NO_AAL:
374 /* ATM_AAL5 is also used in the "0 for default" case */ 365 /* ATM_AAL5 is also used in the "0 for default" case */
375 vcc->qos.aal = ATM_AAL5; 366 vcc->qos.aal = ATM_AAL5;
376 /* fall through */ 367 /* fall through */
377 case ATM_AAL5: 368 case ATM_AAL5:
378 error = atm_init_aal5(vcc); 369 error = atm_init_aal5(vcc);
379 vcc->stats = &dev->stats.aal5; 370 vcc->stats = &dev->stats.aal5;
380 break; 371 break;
381 default: 372 default:
382 error = -EPROTOTYPE; 373 error = -EPROTOTYPE;
383 } 374 }
384 if (!error) error = adjust_tp(&vcc->qos.txtp,vcc->qos.aal); 375 if (!error)
385 if (!error) error = adjust_tp(&vcc->qos.rxtp,vcc->qos.aal); 376 error = adjust_tp(&vcc->qos.txtp, vcc->qos.aal);
377 if (!error)
378 error = adjust_tp(&vcc->qos.rxtp, vcc->qos.aal);
386 if (error) 379 if (error)
387 goto fail; 380 goto fail;
388 pr_debug("VCC %d.%d, AAL %d\n",vpi,vci,vcc->qos.aal); 381 pr_debug("VCC %d.%d, AAL %d\n", vpi, vci, vcc->qos.aal);
389 pr_debug(" TX: %d, PCR %d..%d, SDU %d\n",vcc->qos.txtp.traffic_class, 382 pr_debug(" TX: %d, PCR %d..%d, SDU %d\n",
390 vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,vcc->qos.txtp.max_sdu); 383 vcc->qos.txtp.traffic_class,
391 pr_debug(" RX: %d, PCR %d..%d, SDU %d\n",vcc->qos.rxtp.traffic_class, 384 vcc->qos.txtp.min_pcr,
392 vcc->qos.rxtp.min_pcr,vcc->qos.rxtp.max_pcr,vcc->qos.rxtp.max_sdu); 385 vcc->qos.txtp.max_pcr,
386 vcc->qos.txtp.max_sdu);
387 pr_debug(" RX: %d, PCR %d..%d, SDU %d\n",
388 vcc->qos.rxtp.traffic_class,
389 vcc->qos.rxtp.min_pcr,
390 vcc->qos.rxtp.max_pcr,
391 vcc->qos.rxtp.max_sdu);
393 392
394 if (dev->ops->open) { 393 if (dev->ops->open) {
395 if ((error = dev->ops->open(vcc))) 394 error = dev->ops->open(vcc);
395 if (error)
396 goto fail; 396 goto fail;
397 } 397 }
398 return 0; 398 return 0;
@@ -406,14 +406,13 @@ fail_module_put:
406 return error; 406 return error;
407} 407}
408 408
409
410int vcc_connect(struct socket *sock, int itf, short vpi, int vci) 409int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
411{ 410{
412 struct atm_dev *dev; 411 struct atm_dev *dev;
413 struct atm_vcc *vcc = ATM_SD(sock); 412 struct atm_vcc *vcc = ATM_SD(sock);
414 int error; 413 int error;
415 414
416 pr_debug("vcc_connect (vpi %d, vci %d)\n",vpi,vci); 415 pr_debug("(vpi %d, vci %d)\n", vpi, vci);
417 if (sock->state == SS_CONNECTED) 416 if (sock->state == SS_CONNECTED)
418 return -EISCONN; 417 return -EISCONN;
419 if (sock->state != SS_UNCONNECTED) 418 if (sock->state != SS_UNCONNECTED)
@@ -422,30 +421,33 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
422 return -EINVAL; 421 return -EINVAL;
423 422
424 if (vpi != ATM_VPI_UNSPEC && vci != ATM_VCI_UNSPEC) 423 if (vpi != ATM_VPI_UNSPEC && vci != ATM_VCI_UNSPEC)
425 clear_bit(ATM_VF_PARTIAL,&vcc->flags); 424 clear_bit(ATM_VF_PARTIAL, &vcc->flags);
426 else 425 else
427 if (test_bit(ATM_VF_PARTIAL,&vcc->flags)) 426 if (test_bit(ATM_VF_PARTIAL, &vcc->flags))
428 return -EINVAL; 427 return -EINVAL;
429 pr_debug("vcc_connect (TX: cl %d,bw %d-%d,sdu %d; " 428 pr_debug("(TX: cl %d,bw %d-%d,sdu %d; "
430 "RX: cl %d,bw %d-%d,sdu %d,AAL %s%d)\n", 429 "RX: cl %d,bw %d-%d,sdu %d,AAL %s%d)\n",
431 vcc->qos.txtp.traffic_class,vcc->qos.txtp.min_pcr, 430 vcc->qos.txtp.traffic_class, vcc->qos.txtp.min_pcr,
432 vcc->qos.txtp.max_pcr,vcc->qos.txtp.max_sdu, 431 vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_sdu,
433 vcc->qos.rxtp.traffic_class,vcc->qos.rxtp.min_pcr, 432 vcc->qos.rxtp.traffic_class, vcc->qos.rxtp.min_pcr,
434 vcc->qos.rxtp.max_pcr,vcc->qos.rxtp.max_sdu, 433 vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_sdu,
435 vcc->qos.aal == ATM_AAL5 ? "" : vcc->qos.aal == ATM_AAL0 ? "" : 434 vcc->qos.aal == ATM_AAL5 ? "" :
436 " ??? code ",vcc->qos.aal == ATM_AAL0 ? 0 : vcc->qos.aal); 435 vcc->qos.aal == ATM_AAL0 ? "" : " ??? code ",
436 vcc->qos.aal == ATM_AAL0 ? 0 : vcc->qos.aal);
437 if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) 437 if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
438 return -EBADFD; 438 return -EBADFD;
439 if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS || 439 if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS ||
440 vcc->qos.rxtp.traffic_class == ATM_ANYCLASS) 440 vcc->qos.rxtp.traffic_class == ATM_ANYCLASS)
441 return -EINVAL; 441 return -EINVAL;
442 if (likely(itf != ATM_ITF_ANY)) { 442 if (likely(itf != ATM_ITF_ANY)) {
443 dev = try_then_request_module(atm_dev_lookup(itf), "atm-device-%d", itf); 443 dev = try_then_request_module(atm_dev_lookup(itf),
444 "atm-device-%d", itf);
444 } else { 445 } else {
445 dev = NULL; 446 dev = NULL;
446 mutex_lock(&atm_dev_mutex); 447 mutex_lock(&atm_dev_mutex);
447 if (!list_empty(&atm_devs)) { 448 if (!list_empty(&atm_devs)) {
448 dev = list_entry(atm_devs.next, struct atm_dev, dev_list); 449 dev = list_entry(atm_devs.next,
450 struct atm_dev, dev_list);
449 atm_dev_hold(dev); 451 atm_dev_hold(dev);
450 } 452 }
451 mutex_unlock(&atm_dev_mutex); 453 mutex_unlock(&atm_dev_mutex);
@@ -458,13 +460,12 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
458 return error; 460 return error;
459 } 461 }
460 if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) 462 if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC)
461 set_bit(ATM_VF_PARTIAL,&vcc->flags); 463 set_bit(ATM_VF_PARTIAL, &vcc->flags);
462 if (test_bit(ATM_VF_READY,&ATM_SD(sock)->flags)) 464 if (test_bit(ATM_VF_READY, &ATM_SD(sock)->flags))
463 sock->state = SS_CONNECTED; 465 sock->state = SS_CONNECTED;
464 return 0; 466 return 0;
465} 467}
466 468
467
468int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, 469int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
469 size_t size, int flags) 470 size_t size, int flags)
470{ 471{
@@ -478,8 +479,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
478 if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */ 479 if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */
479 return -EOPNOTSUPP; 480 return -EOPNOTSUPP;
480 vcc = ATM_SD(sock); 481 vcc = ATM_SD(sock);
481 if (test_bit(ATM_VF_RELEASED,&vcc->flags) || 482 if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
482 test_bit(ATM_VF_CLOSE,&vcc->flags) || 483 test_bit(ATM_VF_CLOSE, &vcc->flags) ||
483 !test_bit(ATM_VF_READY, &vcc->flags)) 484 !test_bit(ATM_VF_READY, &vcc->flags))
484 return 0; 485 return 0;
485 486
@@ -497,13 +498,12 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
497 if (error) 498 if (error)
498 return error; 499 return error;
499 sock_recv_ts_and_drops(msg, sk, skb); 500 sock_recv_ts_and_drops(msg, sk, skb);
500 pr_debug("RcvM %d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); 501 pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize);
501 atm_return(vcc, skb->truesize); 502 atm_return(vcc, skb->truesize);
502 skb_free_datagram(sk, skb); 503 skb_free_datagram(sk, skb);
503 return copied; 504 return copied;
504} 505}
505 506
506
507int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 507int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
508 size_t total_len) 508 size_t total_len)
509{ 509{
@@ -511,7 +511,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
511 DEFINE_WAIT(wait); 511 DEFINE_WAIT(wait);
512 struct atm_vcc *vcc; 512 struct atm_vcc *vcc;
513 struct sk_buff *skb; 513 struct sk_buff *skb;
514 int eff,error; 514 int eff, error;
515 const void __user *buff; 515 const void __user *buff;
516 int size; 516 int size;
517 517
@@ -550,7 +550,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
550 eff = (size+3) & ~3; /* align to word boundary */ 550 eff = (size+3) & ~3; /* align to word boundary */
551 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 551 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
552 error = 0; 552 error = 0;
553 while (!(skb = alloc_tx(vcc,eff))) { 553 while (!(skb = alloc_tx(vcc, eff))) {
554 if (m->msg_flags & MSG_DONTWAIT) { 554 if (m->msg_flags & MSG_DONTWAIT) {
555 error = -EAGAIN; 555 error = -EAGAIN;
556 break; 556 break;
@@ -560,9 +560,9 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
560 error = -ERESTARTSYS; 560 error = -ERESTARTSYS;
561 break; 561 break;
562 } 562 }
563 if (test_bit(ATM_VF_RELEASED,&vcc->flags) || 563 if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
564 test_bit(ATM_VF_CLOSE,&vcc->flags) || 564 test_bit(ATM_VF_CLOSE, &vcc->flags) ||
565 !test_bit(ATM_VF_READY,&vcc->flags)) { 565 !test_bit(ATM_VF_READY, &vcc->flags)) {
566 error = -EPIPE; 566 error = -EPIPE;
567 send_sig(SIGPIPE, current, 0); 567 send_sig(SIGPIPE, current, 0);
568 break; 568 break;
@@ -574,20 +574,20 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
574 goto out; 574 goto out;
575 skb->dev = NULL; /* for paths shared with net_device interfaces */ 575 skb->dev = NULL; /* for paths shared with net_device interfaces */
576 ATM_SKB(skb)->atm_options = vcc->atm_options; 576 ATM_SKB(skb)->atm_options = vcc->atm_options;
577 if (copy_from_user(skb_put(skb,size),buff,size)) { 577 if (copy_from_user(skb_put(skb, size), buff, size)) {
578 kfree_skb(skb); 578 kfree_skb(skb);
579 error = -EFAULT; 579 error = -EFAULT;
580 goto out; 580 goto out;
581 } 581 }
582 if (eff != size) memset(skb->data+size,0,eff-size); 582 if (eff != size)
583 error = vcc->dev->ops->send(vcc,skb); 583 memset(skb->data + size, 0, eff-size);
584 error = vcc->dev->ops->send(vcc, skb);
584 error = error ? error : size; 585 error = error ? error : size;
585out: 586out:
586 release_sock(sk); 587 release_sock(sk);
587 return error; 588 return error;
588} 589}
589 590
590
591unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait) 591unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
592{ 592{
593 struct sock *sk = sock->sk; 593 struct sock *sk = sock->sk;
@@ -623,8 +623,7 @@ unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
623 return mask; 623 return mask;
624} 624}
625 625
626 626static int atm_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
627static int atm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
628{ 627{
629 int error; 628 int error;
630 629
@@ -636,25 +635,31 @@ static int atm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
636 qos->rxtp.traffic_class != vcc->qos.rxtp.traffic_class || 635 qos->rxtp.traffic_class != vcc->qos.rxtp.traffic_class ||
637 qos->txtp.traffic_class != vcc->qos.txtp.traffic_class) 636 qos->txtp.traffic_class != vcc->qos.txtp.traffic_class)
638 return -EINVAL; 637 return -EINVAL;
639 error = adjust_tp(&qos->txtp,qos->aal); 638 error = adjust_tp(&qos->txtp, qos->aal);
640 if (!error) error = adjust_tp(&qos->rxtp,qos->aal); 639 if (!error)
641 if (error) return error; 640 error = adjust_tp(&qos->rxtp, qos->aal);
642 if (!vcc->dev->ops->change_qos) return -EOPNOTSUPP; 641 if (error)
642 return error;
643 if (!vcc->dev->ops->change_qos)
644 return -EOPNOTSUPP;
643 if (sk_atm(vcc)->sk_family == AF_ATMPVC) 645 if (sk_atm(vcc)->sk_family == AF_ATMPVC)
644 return vcc->dev->ops->change_qos(vcc,qos,ATM_MF_SET); 646 return vcc->dev->ops->change_qos(vcc, qos, ATM_MF_SET);
645 return svc_change_qos(vcc,qos); 647 return svc_change_qos(vcc, qos);
646} 648}
647 649
648
649static int check_tp(const struct atm_trafprm *tp) 650static int check_tp(const struct atm_trafprm *tp)
650{ 651{
651 /* @@@ Should be merged with adjust_tp */ 652 /* @@@ Should be merged with adjust_tp */
652 if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS) return 0; 653 if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS)
654 return 0;
653 if (tp->traffic_class != ATM_UBR && !tp->min_pcr && !tp->pcr && 655 if (tp->traffic_class != ATM_UBR && !tp->min_pcr && !tp->pcr &&
654 !tp->max_pcr) return -EINVAL; 656 !tp->max_pcr)
655 if (tp->min_pcr == ATM_MAX_PCR) return -EINVAL; 657 return -EINVAL;
658 if (tp->min_pcr == ATM_MAX_PCR)
659 return -EINVAL;
656 if (tp->min_pcr && tp->max_pcr && tp->max_pcr != ATM_MAX_PCR && 660 if (tp->min_pcr && tp->max_pcr && tp->max_pcr != ATM_MAX_PCR &&
657 tp->min_pcr > tp->max_pcr) return -EINVAL; 661 tp->min_pcr > tp->max_pcr)
662 return -EINVAL;
658 /* 663 /*
659 * We allow pcr to be outside [min_pcr,max_pcr], because later 664 * We allow pcr to be outside [min_pcr,max_pcr], because later
660 * adjustment may still push it in the valid range. 665 * adjustment may still push it in the valid range.
@@ -662,7 +667,6 @@ static int check_tp(const struct atm_trafprm *tp)
662 return 0; 667 return 0;
663} 668}
664 669
665
666static int check_qos(const struct atm_qos *qos) 670static int check_qos(const struct atm_qos *qos)
667{ 671{
668 int error; 672 int error;
@@ -672,9 +676,11 @@ static int check_qos(const struct atm_qos *qos)
672 if (qos->txtp.traffic_class != qos->rxtp.traffic_class && 676 if (qos->txtp.traffic_class != qos->rxtp.traffic_class &&
673 qos->txtp.traffic_class && qos->rxtp.traffic_class && 677 qos->txtp.traffic_class && qos->rxtp.traffic_class &&
674 qos->txtp.traffic_class != ATM_ANYCLASS && 678 qos->txtp.traffic_class != ATM_ANYCLASS &&
675 qos->rxtp.traffic_class != ATM_ANYCLASS) return -EINVAL; 679 qos->rxtp.traffic_class != ATM_ANYCLASS)
680 return -EINVAL;
676 error = check_tp(&qos->txtp); 681 error = check_tp(&qos->txtp);
677 if (error) return error; 682 if (error)
683 return error;
678 return check_tp(&qos->rxtp); 684 return check_tp(&qos->rxtp);
679} 685}
680 686
@@ -690,37 +696,41 @@ int vcc_setsockopt(struct socket *sock, int level, int optname,
690 696
691 vcc = ATM_SD(sock); 697 vcc = ATM_SD(sock);
692 switch (optname) { 698 switch (optname) {
693 case SO_ATMQOS: 699 case SO_ATMQOS:
694 { 700 {
695 struct atm_qos qos; 701 struct atm_qos qos;
696 702
697 if (copy_from_user(&qos,optval,sizeof(qos))) 703 if (copy_from_user(&qos, optval, sizeof(qos)))
698 return -EFAULT; 704 return -EFAULT;
699 error = check_qos(&qos); 705 error = check_qos(&qos);
700 if (error) return error; 706 if (error)
701 if (sock->state == SS_CONNECTED) 707 return error;
702 return atm_change_qos(vcc,&qos); 708 if (sock->state == SS_CONNECTED)
703 if (sock->state != SS_UNCONNECTED) 709 return atm_change_qos(vcc, &qos);
704 return -EBADFD; 710 if (sock->state != SS_UNCONNECTED)
705 vcc->qos = qos; 711 return -EBADFD;
706 set_bit(ATM_VF_HASQOS,&vcc->flags); 712 vcc->qos = qos;
707 return 0; 713 set_bit(ATM_VF_HASQOS, &vcc->flags);
708 } 714 return 0;
709 case SO_SETCLP:
710 if (get_user(value,(unsigned long __user *)optval))
711 return -EFAULT;
712 if (value) vcc->atm_options |= ATM_ATMOPT_CLP;
713 else vcc->atm_options &= ~ATM_ATMOPT_CLP;
714 return 0;
715 default:
716 if (level == SOL_SOCKET) return -EINVAL;
717 break;
718 } 715 }
719 if (!vcc->dev || !vcc->dev->ops->setsockopt) return -EINVAL; 716 case SO_SETCLP:
720 return vcc->dev->ops->setsockopt(vcc,level,optname,optval,optlen); 717 if (get_user(value, (unsigned long __user *)optval))
718 return -EFAULT;
719 if (value)
720 vcc->atm_options |= ATM_ATMOPT_CLP;
721 else
722 vcc->atm_options &= ~ATM_ATMOPT_CLP;
723 return 0;
724 default:
725 if (level == SOL_SOCKET)
726 return -EINVAL;
727 break;
728 }
729 if (!vcc->dev || !vcc->dev->ops->setsockopt)
730 return -EINVAL;
731 return vcc->dev->ops->setsockopt(vcc, level, optname, optval, optlen);
721} 732}
722 733
723
724int vcc_getsockopt(struct socket *sock, int level, int optname, 734int vcc_getsockopt(struct socket *sock, int level, int optname,
725 char __user *optval, int __user *optlen) 735 char __user *optval, int __user *optlen)
726{ 736{
@@ -734,33 +744,33 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
734 744
735 vcc = ATM_SD(sock); 745 vcc = ATM_SD(sock);
736 switch (optname) { 746 switch (optname) {
737 case SO_ATMQOS: 747 case SO_ATMQOS:
738 if (!test_bit(ATM_VF_HASQOS,&vcc->flags)) 748 if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
739 return -EINVAL; 749 return -EINVAL;
740 return copy_to_user(optval,&vcc->qos,sizeof(vcc->qos)) ? 750 return copy_to_user(optval, &vcc->qos, sizeof(vcc->qos))
741 -EFAULT : 0; 751 ? -EFAULT : 0;
742 case SO_SETCLP: 752 case SO_SETCLP:
743 return put_user(vcc->atm_options & ATM_ATMOPT_CLP ? 1 : 753 return put_user(vcc->atm_options & ATM_ATMOPT_CLP ? 1 : 0,
744 0,(unsigned long __user *)optval) ? -EFAULT : 0; 754 (unsigned long __user *)optval) ? -EFAULT : 0;
745 case SO_ATMPVC: 755 case SO_ATMPVC:
746 { 756 {
747 struct sockaddr_atmpvc pvc; 757 struct sockaddr_atmpvc pvc;
748 758
749 if (!vcc->dev || 759 if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
750 !test_bit(ATM_VF_ADDR,&vcc->flags)) 760 return -ENOTCONN;
751 return -ENOTCONN; 761 pvc.sap_family = AF_ATMPVC;
752 pvc.sap_family = AF_ATMPVC; 762 pvc.sap_addr.itf = vcc->dev->number;
753 pvc.sap_addr.itf = vcc->dev->number; 763 pvc.sap_addr.vpi = vcc->vpi;
754 pvc.sap_addr.vpi = vcc->vpi; 764 pvc.sap_addr.vci = vcc->vci;
755 pvc.sap_addr.vci = vcc->vci; 765 return copy_to_user(optval, &pvc, sizeof(pvc)) ? -EFAULT : 0;
756 return copy_to_user(optval,&pvc,sizeof(pvc)) ? 766 }
757 -EFAULT : 0; 767 default:
758 } 768 if (level == SOL_SOCKET)
759 default: 769 return -EINVAL;
760 if (level == SOL_SOCKET) return -EINVAL;
761 break; 770 break;
762 } 771 }
763 if (!vcc->dev || !vcc->dev->ops->getsockopt) return -EINVAL; 772 if (!vcc->dev || !vcc->dev->ops->getsockopt)
773 return -EINVAL;
764 return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len); 774 return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len);
765} 775}
766 776
@@ -768,23 +778,27 @@ static int __init atm_init(void)
768{ 778{
769 int error; 779 int error;
770 780
771 if ((error = proto_register(&vcc_proto, 0)) < 0) 781 error = proto_register(&vcc_proto, 0);
782 if (error < 0)
772 goto out; 783 goto out;
773 784 error = atmpvc_init();
774 if ((error = atmpvc_init()) < 0) { 785 if (error < 0) {
775 printk(KERN_ERR "atmpvc_init() failed with %d\n", error); 786 pr_err("atmpvc_init() failed with %d\n", error);
776 goto out_unregister_vcc_proto; 787 goto out_unregister_vcc_proto;
777 } 788 }
778 if ((error = atmsvc_init()) < 0) { 789 error = atmsvc_init();
779 printk(KERN_ERR "atmsvc_init() failed with %d\n", error); 790 if (error < 0) {
791 pr_err("atmsvc_init() failed with %d\n", error);
780 goto out_atmpvc_exit; 792 goto out_atmpvc_exit;
781 } 793 }
782 if ((error = atm_proc_init()) < 0) { 794 error = atm_proc_init();
783 printk(KERN_ERR "atm_proc_init() failed with %d\n",error); 795 if (error < 0) {
796 pr_err("atm_proc_init() failed with %d\n", error);
784 goto out_atmsvc_exit; 797 goto out_atmsvc_exit;
785 } 798 }
786 if ((error = atm_sysfs_init()) < 0) { 799 error = atm_sysfs_init();
787 printk(KERN_ERR "atm_sysfs_init() failed with %d\n",error); 800 if (error < 0) {
801 pr_err("atm_sysfs_init() failed with %d\n", error);
788 goto out_atmproc_exit; 802 goto out_atmproc_exit;
789 } 803 }
790out: 804out:
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index 2ea40995dced..62dc8bfe6fe7 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -3,6 +3,7 @@
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4/* 2003 John Levon <levon@movementarian.org> */ 4/* 2003 John Levon <levon@movementarian.org> */
5 5
6#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
6 7
7#include <linux/module.h> 8#include <linux/module.h>
8#include <linux/kmod.h> 9#include <linux/kmod.h>
@@ -36,6 +37,7 @@ void register_atm_ioctl(struct atm_ioctl *ioctl)
36 list_add_tail(&ioctl->list, &ioctl_list); 37 list_add_tail(&ioctl->list, &ioctl_list);
37 mutex_unlock(&ioctl_mutex); 38 mutex_unlock(&ioctl_mutex);
38} 39}
40EXPORT_SYMBOL(register_atm_ioctl);
39 41
40void deregister_atm_ioctl(struct atm_ioctl *ioctl) 42void deregister_atm_ioctl(struct atm_ioctl *ioctl)
41{ 43{
@@ -43,129 +45,128 @@ void deregister_atm_ioctl(struct atm_ioctl *ioctl)
43 list_del(&ioctl->list); 45 list_del(&ioctl->list);
44 mutex_unlock(&ioctl_mutex); 46 mutex_unlock(&ioctl_mutex);
45} 47}
46
47EXPORT_SYMBOL(register_atm_ioctl);
48EXPORT_SYMBOL(deregister_atm_ioctl); 48EXPORT_SYMBOL(deregister_atm_ioctl);
49 49
50static int do_vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg, int compat) 50static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
51 unsigned long arg, int compat)
51{ 52{
52 struct sock *sk = sock->sk; 53 struct sock *sk = sock->sk;
53 struct atm_vcc *vcc; 54 struct atm_vcc *vcc;
54 int error; 55 int error;
55 struct list_head * pos; 56 struct list_head *pos;
56 void __user *argp = (void __user *)arg; 57 void __user *argp = (void __user *)arg;
57 58
58 vcc = ATM_SD(sock); 59 vcc = ATM_SD(sock);
59 switch (cmd) { 60 switch (cmd) {
60 case SIOCOUTQ: 61 case SIOCOUTQ:
61 if (sock->state != SS_CONNECTED || 62 if (sock->state != SS_CONNECTED ||
62 !test_bit(ATM_VF_READY, &vcc->flags)) { 63 !test_bit(ATM_VF_READY, &vcc->flags)) {
63 error = -EINVAL; 64 error = -EINVAL;
64 goto done; 65 goto done;
65 } 66 }
66 error = put_user(sk->sk_sndbuf - sk_wmem_alloc_get(sk), 67 error = put_user(sk->sk_sndbuf - sk_wmem_alloc_get(sk),
67 (int __user *) argp) ? -EFAULT : 0; 68 (int __user *)argp) ? -EFAULT : 0;
69 goto done;
70 case SIOCINQ:
71 {
72 struct sk_buff *skb;
73
74 if (sock->state != SS_CONNECTED) {
75 error = -EINVAL;
68 goto done; 76 goto done;
69 case SIOCINQ: 77 }
70 { 78 skb = skb_peek(&sk->sk_receive_queue);
71 struct sk_buff *skb; 79 error = put_user(skb ? skb->len : 0,
72 80 (int __user *)argp) ? -EFAULT : 0;
73 if (sock->state != SS_CONNECTED) { 81 goto done;
74 error = -EINVAL; 82 }
75 goto done; 83 case SIOCGSTAMP: /* borrowed from IP */
76 }
77 skb = skb_peek(&sk->sk_receive_queue);
78 error = put_user(skb ? skb->len : 0,
79 (int __user *)argp) ? -EFAULT : 0;
80 goto done;
81 }
82 case SIOCGSTAMP: /* borrowed from IP */
83#ifdef CONFIG_COMPAT 84#ifdef CONFIG_COMPAT
84 if (compat) 85 if (compat)
85 error = compat_sock_get_timestamp(sk, argp); 86 error = compat_sock_get_timestamp(sk, argp);
86 else 87 else
87#endif 88#endif
88 error = sock_get_timestamp(sk, argp); 89 error = sock_get_timestamp(sk, argp);
89 goto done; 90 goto done;
90 case SIOCGSTAMPNS: /* borrowed from IP */ 91 case SIOCGSTAMPNS: /* borrowed from IP */
91#ifdef CONFIG_COMPAT 92#ifdef CONFIG_COMPAT
92 if (compat) 93 if (compat)
93 error = compat_sock_get_timestampns(sk, argp); 94 error = compat_sock_get_timestampns(sk, argp);
94 else 95 else
95#endif 96#endif
96 error = sock_get_timestampns(sk, argp); 97 error = sock_get_timestampns(sk, argp);
98 goto done;
99 case ATM_SETSC:
100 if (net_ratelimit())
101 pr_warning("ATM_SETSC is obsolete; used by %s:%d\n",
102 current->comm, task_pid_nr(current));
103 error = 0;
104 goto done;
105 case ATMSIGD_CTRL:
106 if (!capable(CAP_NET_ADMIN)) {
107 error = -EPERM;
97 goto done; 108 goto done;
98 case ATM_SETSC: 109 }
99 if (net_ratelimit()) 110 /*
100 printk(KERN_WARNING "ATM_SETSC is obsolete; used by %s:%d\n", 111 * The user/kernel protocol for exchanging signalling
101 current->comm, task_pid_nr(current)); 112 * info uses kernel pointers as opaque references,
102 error = 0; 113 * so the holder of the file descriptor can scribble
114 * on the kernel... so we should make sure that we
115 * have the same privileges that /proc/kcore needs
116 */
117 if (!capable(CAP_SYS_RAWIO)) {
118 error = -EPERM;
103 goto done; 119 goto done;
104 case ATMSIGD_CTRL: 120 }
105 if (!capable(CAP_NET_ADMIN)) {
106 error = -EPERM;
107 goto done;
108 }
109 /*
110 * The user/kernel protocol for exchanging signalling
111 * info uses kernel pointers as opaque references,
112 * so the holder of the file descriptor can scribble
113 * on the kernel... so we should make sure that we
114 * have the same privileges that /proc/kcore needs
115 */
116 if (!capable(CAP_SYS_RAWIO)) {
117 error = -EPERM;
118 goto done;
119 }
120#ifdef CONFIG_COMPAT 121#ifdef CONFIG_COMPAT
121 /* WTF? I don't even want to _think_ about making this 122 /* WTF? I don't even want to _think_ about making this
122 work for 32-bit userspace. TBH I don't really want 123 work for 32-bit userspace. TBH I don't really want
123 to think about it at all. dwmw2. */ 124 to think about it at all. dwmw2. */
124 if (compat) { 125 if (compat) {
125 if (net_ratelimit()) 126 if (net_ratelimit())
126 printk(KERN_WARNING "32-bit task cannot be atmsigd\n"); 127 pr_warning("32-bit task cannot be atmsigd\n");
127 error = -EINVAL; 128 error = -EINVAL;
128 goto done; 129 goto done;
129 } 130 }
130#endif 131#endif
131 error = sigd_attach(vcc); 132 error = sigd_attach(vcc);
132 if (!error) 133 if (!error)
133 sock->state = SS_CONNECTED; 134 sock->state = SS_CONNECTED;
135 goto done;
136 case ATM_SETBACKEND:
137 case ATM_NEWBACKENDIF:
138 {
139 atm_backend_t backend;
140 error = get_user(backend, (atm_backend_t __user *)argp);
141 if (error)
134 goto done; 142 goto done;
135 case ATM_SETBACKEND: 143 switch (backend) {
136 case ATM_NEWBACKENDIF: 144 case ATM_BACKEND_PPP:
137 { 145 request_module("pppoatm");
138 atm_backend_t backend;
139 error = get_user(backend, (atm_backend_t __user *) argp);
140 if (error)
141 goto done;
142 switch (backend) {
143 case ATM_BACKEND_PPP:
144 request_module("pppoatm");
145 break;
146 case ATM_BACKEND_BR2684:
147 request_module("br2684");
148 break;
149 }
150 }
151 break;
152 case ATMMPC_CTRL:
153 case ATMMPC_DATA:
154 request_module("mpoa");
155 break;
156 case ATMARPD_CTRL:
157 request_module("clip");
158 break; 146 break;
159 case ATMLEC_CTRL: 147 case ATM_BACKEND_BR2684:
160 request_module("lec"); 148 request_module("br2684");
161 break; 149 break;
150 }
151 break;
152 }
153 case ATMMPC_CTRL:
154 case ATMMPC_DATA:
155 request_module("mpoa");
156 break;
157 case ATMARPD_CTRL:
158 request_module("clip");
159 break;
160 case ATMLEC_CTRL:
161 request_module("lec");
162 break;
162 } 163 }
163 164
164 error = -ENOIOCTLCMD; 165 error = -ENOIOCTLCMD;
165 166
166 mutex_lock(&ioctl_mutex); 167 mutex_lock(&ioctl_mutex);
167 list_for_each(pos, &ioctl_list) { 168 list_for_each(pos, &ioctl_list) {
168 struct atm_ioctl * ic = list_entry(pos, struct atm_ioctl, list); 169 struct atm_ioctl *ic = list_entry(pos, struct atm_ioctl, list);
169 if (try_module_get(ic->owner)) { 170 if (try_module_get(ic->owner)) {
170 error = ic->ioctl(sock, cmd, arg); 171 error = ic->ioctl(sock, cmd, arg);
171 module_put(ic->owner); 172 module_put(ic->owner);
@@ -184,7 +185,6 @@ done:
184 return error; 185 return error;
185} 186}
186 187
187
188int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 188int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
189{ 189{
190 return do_vcc_ioctl(sock, cmd, arg, 0); 190 return do_vcc_ioctl(sock, cmd, arg, 0);
@@ -287,8 +287,8 @@ static int do_atmif_sioc(struct socket *sock, unsigned int cmd,
287 sioc = compat_alloc_user_space(sizeof(*sioc)); 287 sioc = compat_alloc_user_space(sizeof(*sioc));
288 sioc32 = compat_ptr(arg); 288 sioc32 = compat_ptr(arg);
289 289
290 if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int)) 290 if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int)) ||
291 || get_user(data, &sioc32->arg)) 291 get_user(data, &sioc32->arg))
292 return -EFAULT; 292 return -EFAULT;
293 datap = compat_ptr(data); 293 datap = compat_ptr(data);
294 if (put_user(datap, &sioc->arg)) 294 if (put_user(datap, &sioc->arg))
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 42749b7b917c..5da5753157f9 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -4,6 +4,8 @@
4 * Marko Kiiskila <mkiiskila@yahoo.com> 4 * Marko Kiiskila <mkiiskila@yahoo.com>
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
8
7#include <linux/kernel.h> 9#include <linux/kernel.h>
8#include <linux/bitops.h> 10#include <linux/bitops.h>
9#include <linux/capability.h> 11#include <linux/capability.h>
@@ -16,7 +18,7 @@
16#include <linux/skbuff.h> 18#include <linux/skbuff.h>
17#include <linux/ip.h> 19#include <linux/ip.h>
18#include <asm/byteorder.h> 20#include <asm/byteorder.h>
19#include <asm/uaccess.h> 21#include <linux/uaccess.h>
20#include <net/arp.h> 22#include <net/arp.h>
21#include <net/dst.h> 23#include <net/dst.h>
22#include <linux/proc_fs.h> 24#include <linux/proc_fs.h>
@@ -85,17 +87,19 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
85 int is_rdesc, 87 int is_rdesc,
86 struct lec_arp_table **ret_entry); 88 struct lec_arp_table **ret_entry);
87static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr, 89static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
88 const unsigned char *atm_addr, unsigned long remoteflag, 90 const unsigned char *atm_addr,
91 unsigned long remoteflag,
89 unsigned int targetless_le_arp); 92 unsigned int targetless_le_arp);
90static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id); 93static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id);
91static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc); 94static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc);
92static void lec_set_flush_tran_id(struct lec_priv *priv, 95static void lec_set_flush_tran_id(struct lec_priv *priv,
93 const unsigned char *atm_addr, 96 const unsigned char *atm_addr,
94 unsigned long tran_id); 97 unsigned long tran_id);
95static void lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data, 98static void lec_vcc_added(struct lec_priv *priv,
99 const struct atmlec_ioc *ioc_data,
96 struct atm_vcc *vcc, 100 struct atm_vcc *vcc,
97 void (*old_push) (struct atm_vcc *vcc, 101 void (*old_push)(struct atm_vcc *vcc,
98 struct sk_buff *skb)); 102 struct sk_buff *skb));
99static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc); 103static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc);
100 104
101/* must be done under lec_arp_lock */ 105/* must be done under lec_arp_lock */
@@ -110,7 +114,6 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
110 kfree(entry); 114 kfree(entry);
111} 115}
112 116
113
114static struct lane2_ops lane2_ops = { 117static struct lane2_ops lane2_ops = {
115 lane2_resolve, /* resolve, spec 3.1.3 */ 118 lane2_resolve, /* resolve, spec 3.1.3 */
116 lane2_associate_req, /* associate_req, spec 3.1.4 */ 119 lane2_associate_req, /* associate_req, spec 3.1.4 */
@@ -148,7 +151,8 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
148 mesg = (struct atmlec_msg *)skb2->data; 151 mesg = (struct atmlec_msg *)skb2->data;
149 mesg->type = l_topology_change; 152 mesg->type = l_topology_change;
150 buff += 4; 153 buff += 4;
151 mesg->content.normal.flag = *buff & 0x01; /* 0x01 is topology change */ 154 mesg->content.normal.flag = *buff & 0x01;
155 /* 0x01 is topology change */
152 156
153 priv = netdev_priv(dev); 157 priv = netdev_priv(dev);
154 atm_force_charge(priv->lecd, skb2->truesize); 158 atm_force_charge(priv->lecd, skb2->truesize);
@@ -242,7 +246,7 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
242 246
243static void lec_tx_timeout(struct net_device *dev) 247static void lec_tx_timeout(struct net_device *dev)
244{ 248{
245 printk(KERN_INFO "%s: tx timeout\n", dev->name); 249 pr_info("%s\n", dev->name);
246 dev->trans_start = jiffies; 250 dev->trans_start = jiffies;
247 netif_wake_queue(dev); 251 netif_wake_queue(dev);
248} 252}
@@ -261,14 +265,10 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
261 unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */ 265 unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */
262#endif 266#endif
263 int is_rdesc; 267 int is_rdesc;
264#if DUMP_PACKETS > 0
265 char buf[300];
266 int i = 0;
267#endif /* DUMP_PACKETS >0 */
268 268
269 pr_debug("lec_start_xmit called\n"); 269 pr_debug("called\n");
270 if (!priv->lecd) { 270 if (!priv->lecd) {
271 printk("%s:No lecd attached\n", dev->name); 271 pr_info("%s:No lecd attached\n", dev->name);
272 dev->stats.tx_errors++; 272 dev->stats.tx_errors++;
273 netif_stop_queue(dev); 273 netif_stop_queue(dev);
274 kfree_skb(skb); 274 kfree_skb(skb);
@@ -276,8 +276,8 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
276 } 276 }
277 277
278 pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n", 278 pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n",
279 (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb), 279 (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb),
280 (long)skb_end_pointer(skb)); 280 (long)skb_end_pointer(skb));
281#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) 281#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
282 if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0) 282 if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0)
283 lec_handle_bridge(skb, dev); 283 lec_handle_bridge(skb, dev);
@@ -285,8 +285,7 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
285 285
286 /* Make sure we have room for lec_id */ 286 /* Make sure we have room for lec_id */
287 if (skb_headroom(skb) < 2) { 287 if (skb_headroom(skb) < 2) {
288 288 pr_debug("reallocating skb\n");
289 pr_debug("lec_start_xmit: reallocating skb\n");
290 skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); 289 skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
291 kfree_skb(skb); 290 kfree_skb(skb);
292 if (skb2 == NULL) 291 if (skb2 == NULL)
@@ -313,23 +312,17 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
313 } 312 }
314#endif 313#endif
315 314
316#if DUMP_PACKETS > 0
317 printk("%s: send datalen:%ld lecid:%4.4x\n", dev->name,
318 skb->len, priv->lecid);
319#if DUMP_PACKETS >= 2 315#if DUMP_PACKETS >= 2
320 for (i = 0; i < skb->len && i < 99; i++) { 316#define MAX_DUMP_SKB 99
321 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
322 }
323#elif DUMP_PACKETS >= 1 317#elif DUMP_PACKETS >= 1
324 for (i = 0; i < skb->len && i < 30; i++) { 318#define MAX_DUMP_SKB 30
325 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]); 319#endif
326 } 320#if DUMP_PACKETS >= 1
321 printk(KERN_DEBUG "%s: send datalen:%ld lecid:%4.4x\n",
322 dev->name, skb->len, priv->lecid);
323 print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1,
324 skb->data, min(skb->len, MAX_DUMP_SKB), true);
327#endif /* DUMP_PACKETS >= 1 */ 325#endif /* DUMP_PACKETS >= 1 */
328 if (i == skb->len)
329 printk("%s\n", buf);
330 else
331 printk("%s...\n", buf);
332#endif /* DUMP_PACKETS > 0 */
333 326
334 /* Minimum ethernet-frame size */ 327 /* Minimum ethernet-frame size */
335#ifdef CONFIG_TR 328#ifdef CONFIG_TR
@@ -367,31 +360,28 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
367#endif 360#endif
368 entry = NULL; 361 entry = NULL;
369 vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry); 362 vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry);
370 pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n", dev->name, 363 pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n",
371 vcc, vcc ? vcc->flags : 0, entry); 364 dev->name, vcc, vcc ? vcc->flags : 0, entry);
372 if (!vcc || !test_bit(ATM_VF_READY, &vcc->flags)) { 365 if (!vcc || !test_bit(ATM_VF_READY, &vcc->flags)) {
373 if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) { 366 if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) {
374 pr_debug("%s:lec_start_xmit: queuing packet, ", 367 pr_debug("%s:queuing packet, MAC address %pM\n",
375 dev->name); 368 dev->name, lec_h->h_dest);
376 pr_debug("MAC address %pM\n", lec_h->h_dest);
377 skb_queue_tail(&entry->tx_wait, skb); 369 skb_queue_tail(&entry->tx_wait, skb);
378 } else { 370 } else {
379 pr_debug 371 pr_debug("%s:tx queue full or no arp entry, dropping, MAC address: %pM\n",
380 ("%s:lec_start_xmit: tx queue full or no arp entry, dropping, ", 372 dev->name, lec_h->h_dest);
381 dev->name);
382 pr_debug("MAC address %pM\n", lec_h->h_dest);
383 dev->stats.tx_dropped++; 373 dev->stats.tx_dropped++;
384 dev_kfree_skb(skb); 374 dev_kfree_skb(skb);
385 } 375 }
386 goto out; 376 goto out;
387 } 377 }
388#if DUMP_PACKETS > 0 378#if DUMP_PACKETS > 0
389 printk("%s:sending to vpi:%d vci:%d\n", dev->name, vcc->vpi, vcc->vci); 379 printk(KERN_DEBUG "%s:sending to vpi:%d vci:%d\n",
380 dev->name, vcc->vpi, vcc->vci);
390#endif /* DUMP_PACKETS > 0 */ 381#endif /* DUMP_PACKETS > 0 */
391 382
392 while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) { 383 while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) {
393 pr_debug("lec.c: emptying tx queue, "); 384 pr_debug("emptying tx queue, MAC address %pM\n", lec_h->h_dest);
394 pr_debug("MAC address %pM\n", lec_h->h_dest);
395 lec_send(vcc, skb2); 385 lec_send(vcc, skb2);
396 } 386 }
397 387
@@ -444,14 +434,12 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
444 pr_debug("%s: msg from zeppelin:%d\n", dev->name, mesg->type); 434 pr_debug("%s: msg from zeppelin:%d\n", dev->name, mesg->type);
445 switch (mesg->type) { 435 switch (mesg->type) {
446 case l_set_mac_addr: 436 case l_set_mac_addr:
447 for (i = 0; i < 6; i++) { 437 for (i = 0; i < 6; i++)
448 dev->dev_addr[i] = mesg->content.normal.mac_addr[i]; 438 dev->dev_addr[i] = mesg->content.normal.mac_addr[i];
449 }
450 break; 439 break;
451 case l_del_mac_addr: 440 case l_del_mac_addr:
452 for (i = 0; i < 6; i++) { 441 for (i = 0; i < 6; i++)
453 dev->dev_addr[i] = 0; 442 dev->dev_addr[i] = 0;
454 }
455 break; 443 break;
456 case l_addr_delete: 444 case l_addr_delete:
457 lec_addr_delete(priv, mesg->content.normal.atm_addr, 445 lec_addr_delete(priv, mesg->content.normal.atm_addr,
@@ -477,10 +465,10 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
477 mesg->content.normal.atm_addr, 465 mesg->content.normal.atm_addr,
478 mesg->content.normal.flag, 466 mesg->content.normal.flag,
479 mesg->content.normal.targetless_le_arp); 467 mesg->content.normal.targetless_le_arp);
480 pr_debug("lec: in l_arp_update\n"); 468 pr_debug("in l_arp_update\n");
481 if (mesg->sizeoftlvs != 0) { /* LANE2 3.1.5 */ 469 if (mesg->sizeoftlvs != 0) { /* LANE2 3.1.5 */
482 pr_debug("lec: LANE2 3.1.5, got tlvs, size %d\n", 470 pr_debug("LANE2 3.1.5, got tlvs, size %d\n",
483 mesg->sizeoftlvs); 471 mesg->sizeoftlvs);
484 lane2_associate_ind(dev, mesg->content.normal.mac_addr, 472 lane2_associate_ind(dev, mesg->content.normal.mac_addr,
485 tmp, mesg->sizeoftlvs); 473 tmp, mesg->sizeoftlvs);
486 } 474 }
@@ -499,13 +487,14 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
499 priv->flush_timeout = (mesg->content.config.flush_timeout * HZ); 487 priv->flush_timeout = (mesg->content.config.flush_timeout * HZ);
500 priv->path_switching_delay = 488 priv->path_switching_delay =
501 (mesg->content.config.path_switching_delay * HZ); 489 (mesg->content.config.path_switching_delay * HZ);
502 priv->lane_version = mesg->content.config.lane_version; /* LANE2 */ 490 priv->lane_version = mesg->content.config.lane_version;
491 /* LANE2 */
503 priv->lane2_ops = NULL; 492 priv->lane2_ops = NULL;
504 if (priv->lane_version > 1) 493 if (priv->lane_version > 1)
505 priv->lane2_ops = &lane2_ops; 494 priv->lane2_ops = &lane2_ops;
506 if (dev_set_mtu(dev, mesg->content.config.mtu)) 495 if (dev_set_mtu(dev, mesg->content.config.mtu))
507 printk("%s: change_mtu to %d failed\n", dev->name, 496 pr_info("%s: change_mtu to %d failed\n",
508 mesg->content.config.mtu); 497 dev->name, mesg->content.config.mtu);
509 priv->is_proxy = mesg->content.config.is_proxy; 498 priv->is_proxy = mesg->content.config.is_proxy;
510 break; 499 break;
511 case l_flush_tran_id: 500 case l_flush_tran_id:
@@ -518,40 +507,35 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
518 break; 507 break;
519 case l_should_bridge: 508 case l_should_bridge:
520#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) 509#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
521 { 510 {
522 pr_debug("%s: bridge zeppelin asks about %pM\n", 511 pr_debug("%s: bridge zeppelin asks about %pM\n",
523 dev->name, mesg->content.proxy.mac_addr); 512 dev->name, mesg->content.proxy.mac_addr);
524 513
525 if (br_fdb_test_addr_hook == NULL) 514 if (br_fdb_test_addr_hook == NULL)
526 break; 515 break;
527 516
528 if (br_fdb_test_addr_hook(dev, 517 if (br_fdb_test_addr_hook(dev, mesg->content.proxy.mac_addr)) {
529 mesg->content.proxy.mac_addr)) { 518 /* hit from bridge table, send LE_ARP_RESPONSE */
530 /* hit from bridge table, send LE_ARP_RESPONSE */ 519 struct sk_buff *skb2;
531 struct sk_buff *skb2; 520 struct sock *sk;
532 struct sock *sk; 521
533 522 pr_debug("%s: entry found, responding to zeppelin\n",
534 pr_debug 523 dev->name);
535 ("%s: entry found, responding to zeppelin\n", 524 skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
536 dev->name); 525 if (skb2 == NULL)
537 skb2 = 526 break;
538 alloc_skb(sizeof(struct atmlec_msg), 527 skb2->len = sizeof(struct atmlec_msg);
539 GFP_ATOMIC); 528 skb_copy_to_linear_data(skb2, mesg, sizeof(*mesg));
540 if (skb2 == NULL) 529 atm_force_charge(priv->lecd, skb2->truesize);
541 break; 530 sk = sk_atm(priv->lecd);
542 skb2->len = sizeof(struct atmlec_msg); 531 skb_queue_tail(&sk->sk_receive_queue, skb2);
543 skb_copy_to_linear_data(skb2, mesg, 532 sk->sk_data_ready(sk, skb2->len);
544 sizeof(*mesg));
545 atm_force_charge(priv->lecd, skb2->truesize);
546 sk = sk_atm(priv->lecd);
547 skb_queue_tail(&sk->sk_receive_queue, skb2);
548 sk->sk_data_ready(sk, skb2->len);
549 }
550 } 533 }
534 }
551#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ 535#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
552 break; 536 break;
553 default: 537 default:
554 printk("%s: Unknown message type %d\n", dev->name, mesg->type); 538 pr_info("%s: Unknown message type %d\n", dev->name, mesg->type);
555 dev_kfree_skb(skb); 539 dev_kfree_skb(skb);
556 return -EINVAL; 540 return -EINVAL;
557 } 541 }
@@ -572,14 +556,13 @@ static void lec_atm_close(struct atm_vcc *vcc)
572 lec_arp_destroy(priv); 556 lec_arp_destroy(priv);
573 557
574 if (skb_peek(&sk_atm(vcc)->sk_receive_queue)) 558 if (skb_peek(&sk_atm(vcc)->sk_receive_queue))
575 printk("%s lec_atm_close: closing with messages pending\n", 559 pr_info("%s closing with messages pending\n", dev->name);
576 dev->name); 560 while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue))) {
577 while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue)) != NULL) {
578 atm_return(vcc, skb->truesize); 561 atm_return(vcc, skb->truesize);
579 dev_kfree_skb(skb); 562 dev_kfree_skb(skb);
580 } 563 }
581 564
582 printk("%s: Shut down!\n", dev->name); 565 pr_info("%s: Shut down!\n", dev->name);
583 module_put(THIS_MODULE); 566 module_put(THIS_MODULE);
584} 567}
585 568
@@ -608,9 +591,8 @@ send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
608 struct sk_buff *skb; 591 struct sk_buff *skb;
609 struct atmlec_msg *mesg; 592 struct atmlec_msg *mesg;
610 593
611 if (!priv || !priv->lecd) { 594 if (!priv || !priv->lecd)
612 return -1; 595 return -1;
613 }
614 skb = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC); 596 skb = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
615 if (!skb) 597 if (!skb)
616 return -1; 598 return -1;
@@ -633,7 +615,7 @@ send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
633 sk->sk_data_ready(sk, skb->len); 615 sk->sk_data_ready(sk, skb->len);
634 616
635 if (data != NULL) { 617 if (data != NULL) {
636 pr_debug("lec: about to send %d bytes of data\n", data->len); 618 pr_debug("about to send %d bytes of data\n", data->len);
637 atm_force_charge(priv->lecd, data->truesize); 619 atm_force_charge(priv->lecd, data->truesize);
638 skb_queue_tail(&sk->sk_receive_queue, data); 620 skb_queue_tail(&sk->sk_receive_queue, data);
639 sk->sk_data_ready(sk, skb->len); 621 sk->sk_data_ready(sk, skb->len);
@@ -691,36 +673,28 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
691 struct net_device *dev = (struct net_device *)vcc->proto_data; 673 struct net_device *dev = (struct net_device *)vcc->proto_data;
692 struct lec_priv *priv = netdev_priv(dev); 674 struct lec_priv *priv = netdev_priv(dev);
693 675
694#if DUMP_PACKETS >0 676#if DUMP_PACKETS > 0
695 int i = 0; 677 printk(KERN_DEBUG "%s: vcc vpi:%d vci:%d\n",
696 char buf[300]; 678 dev->name, vcc->vpi, vcc->vci);
697
698 printk("%s: lec_push vcc vpi:%d vci:%d\n", dev->name,
699 vcc->vpi, vcc->vci);
700#endif 679#endif
701 if (!skb) { 680 if (!skb) {
702 pr_debug("%s: null skb\n", dev->name); 681 pr_debug("%s: null skb\n", dev->name);
703 lec_vcc_close(priv, vcc); 682 lec_vcc_close(priv, vcc);
704 return; 683 return;
705 } 684 }
706#if DUMP_PACKETS > 0
707 printk("%s: rcv datalen:%ld lecid:%4.4x\n", dev->name,
708 skb->len, priv->lecid);
709#if DUMP_PACKETS >= 2 685#if DUMP_PACKETS >= 2
710 for (i = 0; i < skb->len && i < 99; i++) { 686#define MAX_SKB_DUMP 99
711 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
712 }
713#elif DUMP_PACKETS >= 1 687#elif DUMP_PACKETS >= 1
714 for (i = 0; i < skb->len && i < 30; i++) { 688#define MAX_SKB_DUMP 30
715 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]); 689#endif
716 } 690#if DUMP_PACKETS > 0
717#endif /* DUMP_PACKETS >= 1 */ 691 printk(KERN_DEBUG "%s: rcv datalen:%ld lecid:%4.4x\n",
718 if (i == skb->len) 692 dev->name, skb->len, priv->lecid);
719 printk("%s\n", buf); 693 print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1,
720 else 694 skb->data, min(MAX_SKB_DUMP, skb->len), true);
721 printk("%s...\n", buf);
722#endif /* DUMP_PACKETS > 0 */ 695#endif /* DUMP_PACKETS > 0 */
723 if (memcmp(skb->data, lec_ctrl_magic, 4) == 0) { /* Control frame, to daemon */ 696 if (memcmp(skb->data, lec_ctrl_magic, 4) == 0) {
697 /* Control frame, to daemon */
724 struct sock *sk = sk_atm(vcc); 698 struct sock *sk = sk_atm(vcc);
725 699
726 pr_debug("%s: To daemon\n", dev->name); 700 pr_debug("%s: To daemon\n", dev->name);
@@ -778,9 +752,8 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
778 dev_kfree_skb(skb); 752 dev_kfree_skb(skb);
779 return; 753 return;
780 } 754 }
781 if (!hlist_empty(&priv->lec_arp_empty_ones)) { 755 if (!hlist_empty(&priv->lec_arp_empty_ones))
782 lec_arp_check_empties(priv, vcc, skb); 756 lec_arp_check_empties(priv, vcc, skb);
783 }
784 skb_pull(skb, 2); /* skip lec_id */ 757 skb_pull(skb, 2); /* skip lec_id */
785#ifdef CONFIG_TR 758#ifdef CONFIG_TR
786 if (priv->is_trdev) 759 if (priv->is_trdev)
@@ -801,7 +774,7 @@ static void lec_pop(struct atm_vcc *vcc, struct sk_buff *skb)
801 struct net_device *dev = skb->dev; 774 struct net_device *dev = skb->dev;
802 775
803 if (vpriv == NULL) { 776 if (vpriv == NULL) {
804 printk("lec_pop(): vpriv = NULL!?!?!?\n"); 777 pr_info("vpriv = NULL!?!?!?\n");
805 return; 778 return;
806 } 779 }
807 780
@@ -822,15 +795,13 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
822 795
823 /* Lecd must be up in this case */ 796 /* Lecd must be up in this case */
824 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc)); 797 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
825 if (bytes_left != 0) { 798 if (bytes_left != 0)
826 printk 799 pr_info("copy from user failed for %d bytes\n", bytes_left);
827 ("lec: lec_vcc_attach, copy from user failed for %d bytes\n",
828 bytes_left);
829 }
830 if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF || 800 if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF ||
831 !dev_lec[ioc_data.dev_num]) 801 !dev_lec[ioc_data.dev_num])
832 return -EINVAL; 802 return -EINVAL;
833 if (!(vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL))) 803 vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
804 if (!vpriv)
834 return -ENOMEM; 805 return -ENOMEM;
835 vpriv->xoff = 0; 806 vpriv->xoff = 0;
836 vpriv->old_pop = vcc->pop; 807 vpriv->old_pop = vcc->pop;
@@ -921,9 +892,8 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
921 priv->flush_timeout = (4 * HZ); 892 priv->flush_timeout = (4 * HZ);
922 priv->path_switching_delay = (6 * HZ); 893 priv->path_switching_delay = (6 * HZ);
923 894
924 if (dev_lec[i]->flags & IFF_UP) { 895 if (dev_lec[i]->flags & IFF_UP)
925 netif_start_queue(dev_lec[i]); 896 netif_start_queue(dev_lec[i]);
926 }
927 __module_get(THIS_MODULE); 897 __module_get(THIS_MODULE);
928 return i; 898 return i;
929} 899}
@@ -1125,7 +1095,9 @@ static int lec_seq_show(struct seq_file *seq, void *v)
1125 else { 1095 else {
1126 struct lec_state *state = seq->private; 1096 struct lec_state *state = seq->private;
1127 struct net_device *dev = state->dev; 1097 struct net_device *dev = state->dev;
1128 struct lec_arp_table *entry = hlist_entry(state->node, struct lec_arp_table, next); 1098 struct lec_arp_table *entry = hlist_entry(state->node,
1099 struct lec_arp_table,
1100 next);
1129 1101
1130 seq_printf(seq, "%s ", dev->name); 1102 seq_printf(seq, "%s ", dev->name);
1131 lec_info(seq, entry); 1103 lec_info(seq, entry);
@@ -1199,13 +1171,13 @@ static int __init lane_module_init(void)
1199 1171
1200 p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops); 1172 p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops);
1201 if (!p) { 1173 if (!p) {
1202 printk(KERN_ERR "Unable to initialize /proc/net/atm/lec\n"); 1174 pr_err("Unable to initialize /proc/net/atm/lec\n");
1203 return -ENOMEM; 1175 return -ENOMEM;
1204 } 1176 }
1205#endif 1177#endif
1206 1178
1207 register_atm_ioctl(&lane_ioctl_ops); 1179 register_atm_ioctl(&lane_ioctl_ops);
1208 printk("lec.c: " __DATE__ " " __TIME__ " initialized\n"); 1180 pr_info("lec.c: " __DATE__ " " __TIME__ " initialized\n");
1209 return 0; 1181 return 0;
1210} 1182}
1211 1183
@@ -1294,13 +1266,13 @@ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
1294 struct lec_priv *priv = netdev_priv(dev); 1266 struct lec_priv *priv = netdev_priv(dev);
1295 1267
1296 if (compare_ether_addr(lan_dst, dev->dev_addr)) 1268 if (compare_ether_addr(lan_dst, dev->dev_addr))
1297 return (0); /* not our mac address */ 1269 return 0; /* not our mac address */
1298 1270
1299 kfree(priv->tlvs); /* NULL if there was no previous association */ 1271 kfree(priv->tlvs); /* NULL if there was no previous association */
1300 1272
1301 priv->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL); 1273 priv->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL);
1302 if (priv->tlvs == NULL) 1274 if (priv->tlvs == NULL)
1303 return (0); 1275 return 0;
1304 priv->sizeoftlvs = sizeoftlvs; 1276 priv->sizeoftlvs = sizeoftlvs;
1305 1277
1306 skb = alloc_skb(sizeoftlvs, GFP_ATOMIC); 1278 skb = alloc_skb(sizeoftlvs, GFP_ATOMIC);
@@ -1310,12 +1282,12 @@ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
1310 skb_copy_to_linear_data(skb, tlvs, sizeoftlvs); 1282 skb_copy_to_linear_data(skb, tlvs, sizeoftlvs);
1311 retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb); 1283 retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb);
1312 if (retval != 0) 1284 if (retval != 0)
1313 printk("lec.c: lane2_associate_req() failed\n"); 1285 pr_info("lec.c: lane2_associate_req() failed\n");
1314 /* 1286 /*
1315 * If the previous association has changed we must 1287 * If the previous association has changed we must
1316 * somehow notify other LANE entities about the change 1288 * somehow notify other LANE entities about the change
1317 */ 1289 */
1318 return (1); 1290 return 1;
1319} 1291}
1320 1292
1321/* 1293/*
@@ -1348,12 +1320,12 @@ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr,
1348 entry->sizeoftlvs = sizeoftlvs; 1320 entry->sizeoftlvs = sizeoftlvs;
1349#endif 1321#endif
1350#if 0 1322#if 0
1351 printk("lec.c: lane2_associate_ind()\n"); 1323 pr_info("\n");
1352 printk("dump of tlvs, sizeoftlvs=%d\n", sizeoftlvs); 1324 pr_info("dump of tlvs, sizeoftlvs=%d\n", sizeoftlvs);
1353 while (i < sizeoftlvs) 1325 while (i < sizeoftlvs)
1354 printk("%02x ", tlvs[i++]); 1326 pr_cont("%02x ", tlvs[i++]);
1355 1327
1356 printk("\n"); 1328 pr_cont("\n");
1357#endif 1329#endif
1358 1330
1359 /* tell MPOA about the TLVs we saw */ 1331 /* tell MPOA about the TLVs we saw */
@@ -1373,15 +1345,15 @@ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr,
1373 1345
1374#include <linux/types.h> 1346#include <linux/types.h>
1375#include <linux/timer.h> 1347#include <linux/timer.h>
1376#include <asm/param.h> 1348#include <linux/param.h>
1377#include <asm/atomic.h> 1349#include <asm/atomic.h>
1378#include <linux/inetdevice.h> 1350#include <linux/inetdevice.h>
1379#include <net/route.h> 1351#include <net/route.h>
1380 1352
1381#if 0 1353#if 0
1382#define pr_debug(format,args...) 1354#define pr_debug(format, args...)
1383/* 1355/*
1384#define pr_debug printk 1356 #define pr_debug printk
1385*/ 1357*/
1386#endif 1358#endif
1387#define DEBUG_ARP_TABLE 0 1359#define DEBUG_ARP_TABLE 0
@@ -1395,7 +1367,7 @@ static void lec_arp_expire_arp(unsigned long data);
1395 * Arp table funcs 1367 * Arp table funcs
1396 */ 1368 */
1397 1369
1398#define HASH(ch) (ch & (LEC_ARP_TABLE_SIZE -1)) 1370#define HASH(ch) (ch & (LEC_ARP_TABLE_SIZE - 1))
1399 1371
1400/* 1372/*
1401 * Initialization of arp-cache 1373 * Initialization of arp-cache
@@ -1404,9 +1376,8 @@ static void lec_arp_init(struct lec_priv *priv)
1404{ 1376{
1405 unsigned short i; 1377 unsigned short i;
1406 1378
1407 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1379 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
1408 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); 1380 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
1409 }
1410 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); 1381 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
1411 INIT_HLIST_HEAD(&priv->lec_no_forward); 1382 INIT_HLIST_HEAD(&priv->lec_no_forward);
1412 INIT_HLIST_HEAD(&priv->mcast_fwds); 1383 INIT_HLIST_HEAD(&priv->mcast_fwds);
@@ -1450,10 +1421,7 @@ lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry)
1450 tmp = &priv->lec_arp_tables[HASH(entry->mac_addr[ETH_ALEN - 1])]; 1421 tmp = &priv->lec_arp_tables[HASH(entry->mac_addr[ETH_ALEN - 1])];
1451 hlist_add_head(&entry->next, tmp); 1422 hlist_add_head(&entry->next, tmp);
1452 1423
1453 pr_debug("LEC_ARP: Added entry:%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", 1424 pr_debug("Added entry:%pM\n", entry->mac_addr);
1454 0xff & entry->mac_addr[0], 0xff & entry->mac_addr[1],
1455 0xff & entry->mac_addr[2], 0xff & entry->mac_addr[3],
1456 0xff & entry->mac_addr[4], 0xff & entry->mac_addr[5]);
1457} 1425}
1458 1426
1459/* 1427/*
@@ -1466,20 +1434,23 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
1466 struct lec_arp_table *entry; 1434 struct lec_arp_table *entry;
1467 int i, remove_vcc = 1; 1435 int i, remove_vcc = 1;
1468 1436
1469 if (!to_remove) { 1437 if (!to_remove)
1470 return -1; 1438 return -1;
1471 }
1472 1439
1473 hlist_del(&to_remove->next); 1440 hlist_del(&to_remove->next);
1474 del_timer(&to_remove->timer); 1441 del_timer(&to_remove->timer);
1475 1442
1476 /* If this is the only MAC connected to this VCC, also tear down the VCC */ 1443 /*
1444 * If this is the only MAC connected to this VCC,
1445 * also tear down the VCC
1446 */
1477 if (to_remove->status >= ESI_FLUSH_PENDING) { 1447 if (to_remove->status >= ESI_FLUSH_PENDING) {
1478 /* 1448 /*
1479 * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT 1449 * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT
1480 */ 1450 */
1481 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1451 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1482 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { 1452 hlist_for_each_entry(entry, node,
1453 &priv->lec_arp_tables[i], next) {
1483 if (memcmp(to_remove->atm_addr, 1454 if (memcmp(to_remove->atm_addr,
1484 entry->atm_addr, ATM_ESA_LEN) == 0) { 1455 entry->atm_addr, ATM_ESA_LEN) == 0) {
1485 remove_vcc = 0; 1456 remove_vcc = 0;
@@ -1492,10 +1463,7 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
1492 } 1463 }
1493 skb_queue_purge(&to_remove->tx_wait); /* FIXME: good place for this? */ 1464 skb_queue_purge(&to_remove->tx_wait); /* FIXME: good place for this? */
1494 1465
1495 pr_debug("LEC_ARP: Removed entry:%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", 1466 pr_debug("Removed entry:%pM\n", to_remove->mac_addr);
1496 0xff & to_remove->mac_addr[0], 0xff & to_remove->mac_addr[1],
1497 0xff & to_remove->mac_addr[2], 0xff & to_remove->mac_addr[3],
1498 0xff & to_remove->mac_addr[4], 0xff & to_remove->mac_addr[5]);
1499 return 0; 1467 return 0;
1500} 1468}
1501 1469
@@ -1513,9 +1481,8 @@ static const char *get_status_string(unsigned char st)
1513 return "ESI_FLUSH_PENDING"; 1481 return "ESI_FLUSH_PENDING";
1514 case ESI_FORWARD_DIRECT: 1482 case ESI_FORWARD_DIRECT:
1515 return "ESI_FORWARD_DIRECT"; 1483 return "ESI_FORWARD_DIRECT";
1516 default:
1517 return "<UNKNOWN>";
1518 } 1484 }
1485 return "<UNKNOWN>";
1519} 1486}
1520 1487
1521static void dump_arp_table(struct lec_priv *priv) 1488static void dump_arp_table(struct lec_priv *priv)
@@ -1525,18 +1492,15 @@ static void dump_arp_table(struct lec_priv *priv)
1525 char buf[256]; 1492 char buf[256];
1526 int i, j, offset; 1493 int i, j, offset;
1527 1494
1528 printk("Dump %p:\n", priv); 1495 pr_info("Dump %p:\n", priv);
1529 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1496 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1530 hlist_for_each_entry(rulla, node, &priv->lec_arp_tables[i], next) { 1497 hlist_for_each_entry(rulla, node,
1498 &priv->lec_arp_tables[i], next) {
1531 offset = 0; 1499 offset = 0;
1532 offset += sprintf(buf, "%d: %p\n", i, rulla); 1500 offset += sprintf(buf, "%d: %p\n", i, rulla);
1533 offset += sprintf(buf + offset, "Mac:"); 1501 offset += sprintf(buf + offset, "Mac: %pM",
1534 for (j = 0; j < ETH_ALEN; j++) { 1502 rulla->mac_addr);
1535 offset += sprintf(buf + offset, 1503 offset += sprintf(buf + offset, " Atm:");
1536 "%2.2x ",
1537 rulla->mac_addr[j] & 0xff);
1538 }
1539 offset += sprintf(buf + offset, "Atm:");
1540 for (j = 0; j < ATM_ESA_LEN; j++) { 1504 for (j = 0; j < ATM_ESA_LEN; j++) {
1541 offset += sprintf(buf + offset, 1505 offset += sprintf(buf + offset,
1542 "%2.2x ", 1506 "%2.2x ",
@@ -1556,20 +1520,16 @@ static void dump_arp_table(struct lec_priv *priv)
1556 "Flags:%x, Packets_flooded:%x, Status: %s ", 1520 "Flags:%x, Packets_flooded:%x, Status: %s ",
1557 rulla->flags, rulla->packets_flooded, 1521 rulla->flags, rulla->packets_flooded,
1558 get_status_string(rulla->status)); 1522 get_status_string(rulla->status));
1559 printk("%s\n", buf); 1523 pr_info("%s\n", buf);
1560 } 1524 }
1561 } 1525 }
1562 1526
1563 if (!hlist_empty(&priv->lec_no_forward)) 1527 if (!hlist_empty(&priv->lec_no_forward))
1564 printk("No forward\n"); 1528 pr_info("No forward\n");
1565 hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) { 1529 hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) {
1566 offset = 0; 1530 offset = 0;
1567 offset += sprintf(buf + offset, "Mac:"); 1531 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
1568 for (j = 0; j < ETH_ALEN; j++) { 1532 offset += sprintf(buf + offset, " Atm:");
1569 offset += sprintf(buf + offset, "%2.2x ",
1570 rulla->mac_addr[j] & 0xff);
1571 }
1572 offset += sprintf(buf + offset, "Atm:");
1573 for (j = 0; j < ATM_ESA_LEN; j++) { 1533 for (j = 0; j < ATM_ESA_LEN; j++) {
1574 offset += sprintf(buf + offset, "%2.2x ", 1534 offset += sprintf(buf + offset, "%2.2x ",
1575 rulla->atm_addr[j] & 0xff); 1535 rulla->atm_addr[j] & 0xff);
@@ -1586,19 +1546,15 @@ static void dump_arp_table(struct lec_priv *priv)
1586 "Flags:%x, Packets_flooded:%x, Status: %s ", 1546 "Flags:%x, Packets_flooded:%x, Status: %s ",
1587 rulla->flags, rulla->packets_flooded, 1547 rulla->flags, rulla->packets_flooded,
1588 get_status_string(rulla->status)); 1548 get_status_string(rulla->status));
1589 printk("%s\n", buf); 1549 pr_info("%s\n", buf);
1590 } 1550 }
1591 1551
1592 if (!hlist_empty(&priv->lec_arp_empty_ones)) 1552 if (!hlist_empty(&priv->lec_arp_empty_ones))
1593 printk("Empty ones\n"); 1553 pr_info("Empty ones\n");
1594 hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) { 1554 hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) {
1595 offset = 0; 1555 offset = 0;
1596 offset += sprintf(buf + offset, "Mac:"); 1556 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
1597 for (j = 0; j < ETH_ALEN; j++) { 1557 offset += sprintf(buf + offset, " Atm:");
1598 offset += sprintf(buf + offset, "%2.2x ",
1599 rulla->mac_addr[j] & 0xff);
1600 }
1601 offset += sprintf(buf + offset, "Atm:");
1602 for (j = 0; j < ATM_ESA_LEN; j++) { 1558 for (j = 0; j < ATM_ESA_LEN; j++) {
1603 offset += sprintf(buf + offset, "%2.2x ", 1559 offset += sprintf(buf + offset, "%2.2x ",
1604 rulla->atm_addr[j] & 0xff); 1560 rulla->atm_addr[j] & 0xff);
@@ -1615,19 +1571,15 @@ static void dump_arp_table(struct lec_priv *priv)
1615 "Flags:%x, Packets_flooded:%x, Status: %s ", 1571 "Flags:%x, Packets_flooded:%x, Status: %s ",
1616 rulla->flags, rulla->packets_flooded, 1572 rulla->flags, rulla->packets_flooded,
1617 get_status_string(rulla->status)); 1573 get_status_string(rulla->status));
1618 printk("%s", buf); 1574 pr_info("%s", buf);
1619 } 1575 }
1620 1576
1621 if (!hlist_empty(&priv->mcast_fwds)) 1577 if (!hlist_empty(&priv->mcast_fwds))
1622 printk("Multicast Forward VCCs\n"); 1578 pr_info("Multicast Forward VCCs\n");
1623 hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) { 1579 hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) {
1624 offset = 0; 1580 offset = 0;
1625 offset += sprintf(buf + offset, "Mac:"); 1581 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
1626 for (j = 0; j < ETH_ALEN; j++) { 1582 offset += sprintf(buf + offset, " Atm:");
1627 offset += sprintf(buf + offset, "%2.2x ",
1628 rulla->mac_addr[j] & 0xff);
1629 }
1630 offset += sprintf(buf + offset, "Atm:");
1631 for (j = 0; j < ATM_ESA_LEN; j++) { 1583 for (j = 0; j < ATM_ESA_LEN; j++) {
1632 offset += sprintf(buf + offset, "%2.2x ", 1584 offset += sprintf(buf + offset, "%2.2x ",
1633 rulla->atm_addr[j] & 0xff); 1585 rulla->atm_addr[j] & 0xff);
@@ -1644,7 +1596,7 @@ static void dump_arp_table(struct lec_priv *priv)
1644 "Flags:%x, Packets_flooded:%x, Status: %s ", 1596 "Flags:%x, Packets_flooded:%x, Status: %s ",
1645 rulla->flags, rulla->packets_flooded, 1597 rulla->flags, rulla->packets_flooded,
1646 get_status_string(rulla->status)); 1598 get_status_string(rulla->status));
1647 printk("%s\n", buf); 1599 pr_info("%s\n", buf);
1648 } 1600 }
1649 1601
1650} 1602}
@@ -1670,14 +1622,16 @@ static void lec_arp_destroy(struct lec_priv *priv)
1670 1622
1671 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1623 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1672 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1624 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1673 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { 1625 hlist_for_each_entry_safe(entry, node, next,
1626 &priv->lec_arp_tables[i], next) {
1674 lec_arp_remove(priv, entry); 1627 lec_arp_remove(priv, entry);
1675 lec_arp_put(entry); 1628 lec_arp_put(entry);
1676 } 1629 }
1677 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); 1630 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
1678 } 1631 }
1679 1632
1680 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { 1633 hlist_for_each_entry_safe(entry, node, next,
1634 &priv->lec_arp_empty_ones, next) {
1681 del_timer_sync(&entry->timer); 1635 del_timer_sync(&entry->timer);
1682 lec_arp_clear_vccs(entry); 1636 lec_arp_clear_vccs(entry);
1683 hlist_del(&entry->next); 1637 hlist_del(&entry->next);
@@ -1685,7 +1639,8 @@ static void lec_arp_destroy(struct lec_priv *priv)
1685 } 1639 }
1686 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); 1640 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
1687 1641
1688 hlist_for_each_entry_safe(entry, node, next, &priv->lec_no_forward, next) { 1642 hlist_for_each_entry_safe(entry, node, next,
1643 &priv->lec_no_forward, next) {
1689 del_timer_sync(&entry->timer); 1644 del_timer_sync(&entry->timer);
1690 lec_arp_clear_vccs(entry); 1645 lec_arp_clear_vccs(entry);
1691 hlist_del(&entry->next); 1646 hlist_del(&entry->next);
@@ -1714,15 +1669,12 @@ static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
1714 struct hlist_head *head; 1669 struct hlist_head *head;
1715 struct lec_arp_table *entry; 1670 struct lec_arp_table *entry;
1716 1671
1717 pr_debug("LEC_ARP: lec_arp_find :%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", 1672 pr_debug("%pM\n", mac_addr);
1718 mac_addr[0] & 0xff, mac_addr[1] & 0xff, mac_addr[2] & 0xff,
1719 mac_addr[3] & 0xff, mac_addr[4] & 0xff, mac_addr[5] & 0xff);
1720 1673
1721 head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])]; 1674 head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])];
1722 hlist_for_each_entry(entry, node, head, next) { 1675 hlist_for_each_entry(entry, node, head, next) {
1723 if (!compare_ether_addr(mac_addr, entry->mac_addr)) { 1676 if (!compare_ether_addr(mac_addr, entry->mac_addr))
1724 return entry; 1677 return entry;
1725 }
1726 } 1678 }
1727 return NULL; 1679 return NULL;
1728} 1680}
@@ -1734,7 +1686,7 @@ static struct lec_arp_table *make_entry(struct lec_priv *priv,
1734 1686
1735 to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC); 1687 to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC);
1736 if (!to_return) { 1688 if (!to_return) {
1737 printk("LEC: Arp entry kmalloc failed\n"); 1689 pr_info("LEC: Arp entry kmalloc failed\n");
1738 return NULL; 1690 return NULL;
1739 } 1691 }
1740 memcpy(to_return->mac_addr, mac_addr, ETH_ALEN); 1692 memcpy(to_return->mac_addr, mac_addr, ETH_ALEN);
@@ -1755,7 +1707,7 @@ static void lec_arp_expire_arp(unsigned long data)
1755 1707
1756 entry = (struct lec_arp_table *)data; 1708 entry = (struct lec_arp_table *)data;
1757 1709
1758 pr_debug("lec_arp_expire_arp\n"); 1710 pr_debug("\n");
1759 if (entry->status == ESI_ARP_PENDING) { 1711 if (entry->status == ESI_ARP_PENDING) {
1760 if (entry->no_tries <= entry->priv->max_retry_count) { 1712 if (entry->no_tries <= entry->priv->max_retry_count) {
1761 if (entry->is_rdesc) 1713 if (entry->is_rdesc)
@@ -1779,10 +1731,10 @@ static void lec_arp_expire_vcc(unsigned long data)
1779 1731
1780 del_timer(&to_remove->timer); 1732 del_timer(&to_remove->timer);
1781 1733
1782 pr_debug("LEC_ARP %p %p: lec_arp_expire_vcc vpi:%d vci:%d\n", 1734 pr_debug("%p %p: vpi:%d vci:%d\n",
1783 to_remove, priv, 1735 to_remove, priv,
1784 to_remove->vcc ? to_remove->recv_vcc->vpi : 0, 1736 to_remove->vcc ? to_remove->recv_vcc->vpi : 0,
1785 to_remove->vcc ? to_remove->recv_vcc->vci : 0); 1737 to_remove->vcc ? to_remove->recv_vcc->vci : 0);
1786 1738
1787 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1739 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1788 hlist_del(&to_remove->next); 1740 hlist_del(&to_remove->next);
@@ -1792,6 +1744,50 @@ static void lec_arp_expire_vcc(unsigned long data)
1792 lec_arp_put(to_remove); 1744 lec_arp_put(to_remove);
1793} 1745}
1794 1746
1747static bool __lec_arp_check_expire(struct lec_arp_table *entry,
1748 unsigned long now,
1749 struct lec_priv *priv)
1750{
1751 unsigned long time_to_check;
1752
1753 if ((entry->flags) & LEC_REMOTE_FLAG && priv->topology_change)
1754 time_to_check = priv->forward_delay_time;
1755 else
1756 time_to_check = priv->aging_time;
1757
1758 pr_debug("About to expire: %lx - %lx > %lx\n",
1759 now, entry->last_used, time_to_check);
1760 if (time_after(now, entry->last_used + time_to_check) &&
1761 !(entry->flags & LEC_PERMANENT_FLAG) &&
1762 !(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */
1763 /* Remove entry */
1764 pr_debug("Entry timed out\n");
1765 lec_arp_remove(priv, entry);
1766 lec_arp_put(entry);
1767 } else {
1768 /* Something else */
1769 if ((entry->status == ESI_VC_PENDING ||
1770 entry->status == ESI_ARP_PENDING) &&
1771 time_after_eq(now, entry->timestamp +
1772 priv->max_unknown_frame_time)) {
1773 entry->timestamp = jiffies;
1774 entry->packets_flooded = 0;
1775 if (entry->status == ESI_VC_PENDING)
1776 send_to_lecd(priv, l_svc_setup,
1777 entry->mac_addr,
1778 entry->atm_addr,
1779 NULL);
1780 }
1781 if (entry->status == ESI_FLUSH_PENDING &&
1782 time_after_eq(now, entry->timestamp +
1783 priv->path_switching_delay)) {
1784 lec_arp_hold(entry);
1785 return true;
1786 }
1787 }
1788
1789 return false;
1790}
1795/* 1791/*
1796 * Expire entries. 1792 * Expire entries.
1797 * 1. Re-set timer 1793 * 1. Re-set timer
@@ -1816,62 +1812,28 @@ static void lec_arp_check_expire(struct work_struct *work)
1816 struct hlist_node *node, *next; 1812 struct hlist_node *node, *next;
1817 struct lec_arp_table *entry; 1813 struct lec_arp_table *entry;
1818 unsigned long now; 1814 unsigned long now;
1819 unsigned long time_to_check;
1820 int i; 1815 int i;
1821 1816
1822 pr_debug("lec_arp_check_expire %p\n", priv); 1817 pr_debug("%p\n", priv);
1823 now = jiffies; 1818 now = jiffies;
1824restart: 1819restart:
1825 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1820 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1826 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1821 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1827 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { 1822 hlist_for_each_entry_safe(entry, node, next,
1828 if ((entry->flags) & LEC_REMOTE_FLAG && 1823 &priv->lec_arp_tables[i], next) {
1829 priv->topology_change) 1824 if (__lec_arp_check_expire(entry, now, priv)) {
1830 time_to_check = priv->forward_delay_time; 1825 struct sk_buff *skb;
1831 else 1826 struct atm_vcc *vcc = entry->vcc;
1832 time_to_check = priv->aging_time; 1827
1833 1828 spin_unlock_irqrestore(&priv->lec_arp_lock,
1834 pr_debug("About to expire: %lx - %lx > %lx\n", 1829 flags);
1835 now, entry->last_used, time_to_check); 1830 while ((skb = skb_dequeue(&entry->tx_wait)))
1836 if (time_after(now, entry->last_used + time_to_check) 1831 lec_send(vcc, skb);
1837 && !(entry->flags & LEC_PERMANENT_FLAG) 1832 entry->last_used = jiffies;
1838 && !(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */ 1833 entry->status = ESI_FORWARD_DIRECT;
1839 /* Remove entry */
1840 pr_debug("LEC:Entry timed out\n");
1841 lec_arp_remove(priv, entry);
1842 lec_arp_put(entry); 1834 lec_arp_put(entry);
1843 } else { 1835
1844 /* Something else */ 1836 goto restart;
1845 if ((entry->status == ESI_VC_PENDING ||
1846 entry->status == ESI_ARP_PENDING)
1847 && time_after_eq(now,
1848 entry->timestamp +
1849 priv->
1850 max_unknown_frame_time)) {
1851 entry->timestamp = jiffies;
1852 entry->packets_flooded = 0;
1853 if (entry->status == ESI_VC_PENDING)
1854 send_to_lecd(priv, l_svc_setup,
1855 entry->mac_addr,
1856 entry->atm_addr,
1857 NULL);
1858 }
1859 if (entry->status == ESI_FLUSH_PENDING
1860 &&
1861 time_after_eq(now, entry->timestamp +
1862 priv->path_switching_delay)) {
1863 struct sk_buff *skb;
1864 struct atm_vcc *vcc = entry->vcc;
1865
1866 lec_arp_hold(entry);
1867 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
1868 while ((skb = skb_dequeue(&entry->tx_wait)) != NULL)
1869 lec_send(vcc, skb);
1870 entry->last_used = jiffies;
1871 entry->status = ESI_FORWARD_DIRECT;
1872 lec_arp_put(entry);
1873 goto restart;
1874 }
1875 } 1837 }
1876 } 1838 }
1877 } 1839 }
@@ -1885,7 +1847,8 @@ restart:
1885 * 1847 *
1886 */ 1848 */
1887static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, 1849static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1888 const unsigned char *mac_to_find, int is_rdesc, 1850 const unsigned char *mac_to_find,
1851 int is_rdesc,
1889 struct lec_arp_table **ret_entry) 1852 struct lec_arp_table **ret_entry)
1890{ 1853{
1891 unsigned long flags; 1854 unsigned long flags;
@@ -1921,9 +1884,8 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1921 * If the LE_ARP cache entry is still pending, reset count to 0 1884 * If the LE_ARP cache entry is still pending, reset count to 0
1922 * so another LE_ARP request can be made for this frame. 1885 * so another LE_ARP request can be made for this frame.
1923 */ 1886 */
1924 if (entry->status == ESI_ARP_PENDING) { 1887 if (entry->status == ESI_ARP_PENDING)
1925 entry->no_tries = 0; 1888 entry->no_tries = 0;
1926 }
1927 /* 1889 /*
1928 * Data direct VC not yet set up, check to see if the unknown 1890 * Data direct VC not yet set up, check to see if the unknown
1929 * frame count is greater than the limit. If the limit has 1891 * frame count is greater than the limit. If the limit has
@@ -1934,7 +1896,7 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1934 entry->packets_flooded < 1896 entry->packets_flooded <
1935 priv->maximum_unknown_frame_count) { 1897 priv->maximum_unknown_frame_count) {
1936 entry->packets_flooded++; 1898 entry->packets_flooded++;
1937 pr_debug("LEC_ARP: Flooding..\n"); 1899 pr_debug("Flooding..\n");
1938 found = priv->mcast_vcc; 1900 found = priv->mcast_vcc;
1939 goto out; 1901 goto out;
1940 } 1902 }
@@ -1945,13 +1907,13 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1945 */ 1907 */
1946 lec_arp_hold(entry); 1908 lec_arp_hold(entry);
1947 *ret_entry = entry; 1909 *ret_entry = entry;
1948 pr_debug("lec: entry->status %d entry->vcc %p\n", entry->status, 1910 pr_debug("entry->status %d entry->vcc %p\n", entry->status,
1949 entry->vcc); 1911 entry->vcc);
1950 found = NULL; 1912 found = NULL;
1951 } else { 1913 } else {
1952 /* No matching entry was found */ 1914 /* No matching entry was found */
1953 entry = make_entry(priv, mac_to_find); 1915 entry = make_entry(priv, mac_to_find);
1954 pr_debug("LEC_ARP: Making entry\n"); 1916 pr_debug("Making entry\n");
1955 if (!entry) { 1917 if (!entry) {
1956 found = priv->mcast_vcc; 1918 found = priv->mcast_vcc;
1957 goto out; 1919 goto out;
@@ -1988,13 +1950,14 @@ lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr,
1988 struct lec_arp_table *entry; 1950 struct lec_arp_table *entry;
1989 int i; 1951 int i;
1990 1952
1991 pr_debug("lec_addr_delete\n"); 1953 pr_debug("\n");
1992 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1954 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1993 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1955 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1994 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { 1956 hlist_for_each_entry_safe(entry, node, next,
1995 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) 1957 &priv->lec_arp_tables[i], next) {
1996 && (permanent || 1958 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) &&
1997 !(entry->flags & LEC_PERMANENT_FLAG))) { 1959 (permanent ||
1960 !(entry->flags & LEC_PERMANENT_FLAG))) {
1998 lec_arp_remove(priv, entry); 1961 lec_arp_remove(priv, entry);
1999 lec_arp_put(entry); 1962 lec_arp_put(entry);
2000 } 1963 }
@@ -2019,10 +1982,8 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
2019 struct lec_arp_table *entry, *tmp; 1982 struct lec_arp_table *entry, *tmp;
2020 int i; 1983 int i;
2021 1984
2022 pr_debug("lec:%s", (targetless_le_arp) ? "targetless " : " "); 1985 pr_debug("%smac:%pM\n",
2023 pr_debug("lec_arp_update mac:%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", 1986 (targetless_le_arp) ? "targetless " : "", mac_addr);
2024 mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
2025 mac_addr[4], mac_addr[5]);
2026 1987
2027 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1988 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2028 entry = lec_arp_find(priv, mac_addr); 1989 entry = lec_arp_find(priv, mac_addr);
@@ -2032,7 +1993,8 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
2032 * we have no entry in the cache. 7.1.30 1993 * we have no entry in the cache. 7.1.30
2033 */ 1994 */
2034 if (!hlist_empty(&priv->lec_arp_empty_ones)) { 1995 if (!hlist_empty(&priv->lec_arp_empty_ones)) {
2035 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { 1996 hlist_for_each_entry_safe(entry, node, next,
1997 &priv->lec_arp_empty_ones, next) {
2036 if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) { 1998 if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) {
2037 hlist_del(&entry->next); 1999 hlist_del(&entry->next);
2038 del_timer(&entry->timer); 2000 del_timer(&entry->timer);
@@ -2076,7 +2038,8 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
2076 memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN); 2038 memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN);
2077 del_timer(&entry->timer); 2039 del_timer(&entry->timer);
2078 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2040 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2079 hlist_for_each_entry(tmp, node, &priv->lec_arp_tables[i], next) { 2041 hlist_for_each_entry(tmp, node,
2042 &priv->lec_arp_tables[i], next) {
2080 if (entry != tmp && 2043 if (entry != tmp &&
2081 !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) { 2044 !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) {
2082 /* Vcc to this host exists */ 2045 /* Vcc to this host exists */
@@ -2121,14 +2084,13 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
2121 int i, found_entry = 0; 2084 int i, found_entry = 0;
2122 2085
2123 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2086 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2087 /* Vcc for Multicast Forward. No timer, LANEv2 7.1.20 and 2.3.5.3 */
2124 if (ioc_data->receive == 2) { 2088 if (ioc_data->receive == 2) {
2125 /* Vcc for Multicast Forward. No timer, LANEv2 7.1.20 and 2.3.5.3 */
2126
2127 pr_debug("LEC_ARP: Attaching mcast forward\n"); 2089 pr_debug("LEC_ARP: Attaching mcast forward\n");
2128#if 0 2090#if 0
2129 entry = lec_arp_find(priv, bus_mac); 2091 entry = lec_arp_find(priv, bus_mac);
2130 if (!entry) { 2092 if (!entry) {
2131 printk("LEC_ARP: Multicast entry not found!\n"); 2093 pr_info("LEC_ARP: Multicast entry not found!\n");
2132 goto out; 2094 goto out;
2133 } 2095 }
2134 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); 2096 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
@@ -2149,19 +2111,17 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
2149 * Vcc which we don't want to make default vcc, 2111 * Vcc which we don't want to make default vcc,
2150 * attach it anyway. 2112 * attach it anyway.
2151 */ 2113 */
2152 pr_debug 2114 pr_debug("LEC_ARP:Attaching data direct, not default: %2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
2153 ("LEC_ARP:Attaching data direct, not default: " 2115 ioc_data->atm_addr[0], ioc_data->atm_addr[1],
2154 "%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", 2116 ioc_data->atm_addr[2], ioc_data->atm_addr[3],
2155 ioc_data->atm_addr[0], ioc_data->atm_addr[1], 2117 ioc_data->atm_addr[4], ioc_data->atm_addr[5],
2156 ioc_data->atm_addr[2], ioc_data->atm_addr[3], 2118 ioc_data->atm_addr[6], ioc_data->atm_addr[7],
2157 ioc_data->atm_addr[4], ioc_data->atm_addr[5], 2119 ioc_data->atm_addr[8], ioc_data->atm_addr[9],
2158 ioc_data->atm_addr[6], ioc_data->atm_addr[7], 2120 ioc_data->atm_addr[10], ioc_data->atm_addr[11],
2159 ioc_data->atm_addr[8], ioc_data->atm_addr[9], 2121 ioc_data->atm_addr[12], ioc_data->atm_addr[13],
2160 ioc_data->atm_addr[10], ioc_data->atm_addr[11], 2122 ioc_data->atm_addr[14], ioc_data->atm_addr[15],
2161 ioc_data->atm_addr[12], ioc_data->atm_addr[13], 2123 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2162 ioc_data->atm_addr[14], ioc_data->atm_addr[15], 2124 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2163 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2164 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2165 entry = make_entry(priv, bus_mac); 2125 entry = make_entry(priv, bus_mac);
2166 if (entry == NULL) 2126 if (entry == NULL)
2167 goto out; 2127 goto out;
@@ -2177,29 +2137,28 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
2177 dump_arp_table(priv); 2137 dump_arp_table(priv);
2178 goto out; 2138 goto out;
2179 } 2139 }
2180 pr_debug 2140 pr_debug("LEC_ARP:Attaching data direct, default: %2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
2181 ("LEC_ARP:Attaching data direct, default: " 2141 ioc_data->atm_addr[0], ioc_data->atm_addr[1],
2182 "%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", 2142 ioc_data->atm_addr[2], ioc_data->atm_addr[3],
2183 ioc_data->atm_addr[0], ioc_data->atm_addr[1], 2143 ioc_data->atm_addr[4], ioc_data->atm_addr[5],
2184 ioc_data->atm_addr[2], ioc_data->atm_addr[3], 2144 ioc_data->atm_addr[6], ioc_data->atm_addr[7],
2185 ioc_data->atm_addr[4], ioc_data->atm_addr[5], 2145 ioc_data->atm_addr[8], ioc_data->atm_addr[9],
2186 ioc_data->atm_addr[6], ioc_data->atm_addr[7], 2146 ioc_data->atm_addr[10], ioc_data->atm_addr[11],
2187 ioc_data->atm_addr[8], ioc_data->atm_addr[9], 2147 ioc_data->atm_addr[12], ioc_data->atm_addr[13],
2188 ioc_data->atm_addr[10], ioc_data->atm_addr[11], 2148 ioc_data->atm_addr[14], ioc_data->atm_addr[15],
2189 ioc_data->atm_addr[12], ioc_data->atm_addr[13], 2149 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2190 ioc_data->atm_addr[14], ioc_data->atm_addr[15], 2150 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2191 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2192 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2193 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2151 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2194 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { 2152 hlist_for_each_entry(entry, node,
2153 &priv->lec_arp_tables[i], next) {
2195 if (memcmp 2154 if (memcmp
2196 (ioc_data->atm_addr, entry->atm_addr, 2155 (ioc_data->atm_addr, entry->atm_addr,
2197 ATM_ESA_LEN) == 0) { 2156 ATM_ESA_LEN) == 0) {
2198 pr_debug("LEC_ARP: Attaching data direct\n"); 2157 pr_debug("LEC_ARP: Attaching data direct\n");
2199 pr_debug("Currently -> Vcc: %d, Rvcc:%d\n", 2158 pr_debug("Currently -> Vcc: %d, Rvcc:%d\n",
2200 entry->vcc ? entry->vcc->vci : 0, 2159 entry->vcc ? entry->vcc->vci : 0,
2201 entry->recv_vcc ? entry->recv_vcc-> 2160 entry->recv_vcc ? entry->recv_vcc->
2202 vci : 0); 2161 vci : 0);
2203 found_entry = 1; 2162 found_entry = 1;
2204 del_timer(&entry->timer); 2163 del_timer(&entry->timer);
2205 entry->vcc = vcc; 2164 entry->vcc = vcc;
@@ -2271,19 +2230,21 @@ static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id)
2271 struct lec_arp_table *entry; 2230 struct lec_arp_table *entry;
2272 int i; 2231 int i;
2273 2232
2274 pr_debug("LEC:lec_flush_complete %lx\n", tran_id); 2233 pr_debug("%lx\n", tran_id);
2275restart: 2234restart:
2276 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2235 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2277 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2236 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2278 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { 2237 hlist_for_each_entry(entry, node,
2279 if (entry->flush_tran_id == tran_id 2238 &priv->lec_arp_tables[i], next) {
2280 && entry->status == ESI_FLUSH_PENDING) { 2239 if (entry->flush_tran_id == tran_id &&
2240 entry->status == ESI_FLUSH_PENDING) {
2281 struct sk_buff *skb; 2241 struct sk_buff *skb;
2282 struct atm_vcc *vcc = entry->vcc; 2242 struct atm_vcc *vcc = entry->vcc;
2283 2243
2284 lec_arp_hold(entry); 2244 lec_arp_hold(entry);
2285 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2245 spin_unlock_irqrestore(&priv->lec_arp_lock,
2286 while ((skb = skb_dequeue(&entry->tx_wait)) != NULL) 2246 flags);
2247 while ((skb = skb_dequeue(&entry->tx_wait)))
2287 lec_send(vcc, skb); 2248 lec_send(vcc, skb);
2288 entry->last_used = jiffies; 2249 entry->last_used = jiffies;
2289 entry->status = ESI_FORWARD_DIRECT; 2250 entry->status = ESI_FORWARD_DIRECT;
@@ -2308,11 +2269,12 @@ lec_set_flush_tran_id(struct lec_priv *priv,
2308 2269
2309 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2270 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2310 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) 2271 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
2311 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { 2272 hlist_for_each_entry(entry, node,
2273 &priv->lec_arp_tables[i], next) {
2312 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) { 2274 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) {
2313 entry->flush_tran_id = tran_id; 2275 entry->flush_tran_id = tran_id;
2314 pr_debug("Set flush transaction id to %lx for %p\n", 2276 pr_debug("Set flush transaction id to %lx for %p\n",
2315 tran_id, entry); 2277 tran_id, entry);
2316 } 2278 }
2317 } 2279 }
2318 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2280 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
@@ -2328,7 +2290,8 @@ static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc)
2328 struct lec_vcc_priv *vpriv; 2290 struct lec_vcc_priv *vpriv;
2329 int err = 0; 2291 int err = 0;
2330 2292
2331 if (!(vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL))) 2293 vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
2294 if (!vpriv)
2332 return -ENOMEM; 2295 return -ENOMEM;
2333 vpriv->xoff = 0; 2296 vpriv->xoff = 0;
2334 vpriv->old_pop = vcc->pop; 2297 vpriv->old_pop = vcc->pop;
@@ -2368,18 +2331,19 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2368 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2331 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2369 2332
2370 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2333 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2371 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { 2334 hlist_for_each_entry_safe(entry, node, next,
2335 &priv->lec_arp_tables[i], next) {
2372 if (vcc == entry->vcc) { 2336 if (vcc == entry->vcc) {
2373 lec_arp_remove(priv, entry); 2337 lec_arp_remove(priv, entry);
2374 lec_arp_put(entry); 2338 lec_arp_put(entry);
2375 if (priv->mcast_vcc == vcc) { 2339 if (priv->mcast_vcc == vcc)
2376 priv->mcast_vcc = NULL; 2340 priv->mcast_vcc = NULL;
2377 }
2378 } 2341 }
2379 } 2342 }
2380 } 2343 }
2381 2344
2382 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { 2345 hlist_for_each_entry_safe(entry, node, next,
2346 &priv->lec_arp_empty_ones, next) {
2383 if (entry->vcc == vcc) { 2347 if (entry->vcc == vcc) {
2384 lec_arp_clear_vccs(entry); 2348 lec_arp_clear_vccs(entry);
2385 del_timer(&entry->timer); 2349 del_timer(&entry->timer);
@@ -2388,7 +2352,8 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2388 } 2352 }
2389 } 2353 }
2390 2354
2391 hlist_for_each_entry_safe(entry, node, next, &priv->lec_no_forward, next) { 2355 hlist_for_each_entry_safe(entry, node, next,
2356 &priv->lec_no_forward, next) {
2392 if (entry->recv_vcc == vcc) { 2357 if (entry->recv_vcc == vcc) {
2393 lec_arp_clear_vccs(entry); 2358 lec_arp_clear_vccs(entry);
2394 del_timer(&entry->timer); 2359 del_timer(&entry->timer);
@@ -2429,14 +2394,16 @@ lec_arp_check_empties(struct lec_priv *priv,
2429 src = hdr->h_source; 2394 src = hdr->h_source;
2430 2395
2431 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2396 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2432 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { 2397 hlist_for_each_entry_safe(entry, node, next,
2398 &priv->lec_arp_empty_ones, next) {
2433 if (vcc == entry->vcc) { 2399 if (vcc == entry->vcc) {
2434 del_timer(&entry->timer); 2400 del_timer(&entry->timer);
2435 memcpy(entry->mac_addr, src, ETH_ALEN); 2401 memcpy(entry->mac_addr, src, ETH_ALEN);
2436 entry->status = ESI_FORWARD_DIRECT; 2402 entry->status = ESI_FORWARD_DIRECT;
2437 entry->last_used = jiffies; 2403 entry->last_used = jiffies;
2438 /* We might have got an entry */ 2404 /* We might have got an entry */
2439 if ((tmp = lec_arp_find(priv, src))) { 2405 tmp = lec_arp_find(priv, src);
2406 if (tmp) {
2440 lec_arp_remove(priv, tmp); 2407 lec_arp_remove(priv, tmp);
2441 lec_arp_put(tmp); 2408 lec_arp_put(tmp);
2442 } 2409 }
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 38a6cb0863f0..a6521c8aa88b 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1,3 +1,5 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
2
1#include <linux/kernel.h> 3#include <linux/kernel.h>
2#include <linux/string.h> 4#include <linux/string.h>
3#include <linux/timer.h> 5#include <linux/timer.h>
@@ -13,8 +15,8 @@
13#include <net/sock.h> 15#include <net/sock.h>
14#include <linux/skbuff.h> 16#include <linux/skbuff.h>
15#include <linux/ip.h> 17#include <linux/ip.h>
18#include <linux/uaccess.h>
16#include <asm/byteorder.h> 19#include <asm/byteorder.h>
17#include <asm/uaccess.h>
18#include <net/checksum.h> /* for ip_fast_csum() */ 20#include <net/checksum.h> /* for ip_fast_csum() */
19#include <net/arp.h> 21#include <net/arp.h>
20#include <net/dst.h> 22#include <net/dst.h>
@@ -36,31 +38,47 @@
36 */ 38 */
37 39
38#if 0 40#if 0
39#define dprintk printk /* debug */ 41#define dprintk(format, args...) \
42 printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args)
43#define dprintk_cont(format, args...) printk(KERN_CONT format, ##args)
40#else 44#else
41#define dprintk(format,args...) 45#define dprintk(format, args...) \
46 do { if (0) \
47 printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args);\
48 } while (0)
49#define dprintk_cont(format, args...) \
50 do { if (0) printk(KERN_CONT format, ##args); } while (0)
42#endif 51#endif
43 52
44#if 0 53#if 0
45#define ddprintk printk /* more debug */ 54#define ddprintk(format, args...) \
55 printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args)
56#define ddprintk_cont(format, args...) printk(KERN_CONT format, ##args)
46#else 57#else
47#define ddprintk(format,args...) 58#define ddprintk(format, args...) \
59 do { if (0) \
60 printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args);\
61 } while (0)
62#define ddprintk_cont(format, args...) \
63 do { if (0) printk(KERN_CONT format, ##args); } while (0)
48#endif 64#endif
49 65
50
51
52#define MPOA_TAG_LEN 4 66#define MPOA_TAG_LEN 4
53 67
54/* mpc_daemon -> kernel */ 68/* mpc_daemon -> kernel */
55static void MPOA_trigger_rcvd (struct k_message *msg, struct mpoa_client *mpc); 69static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc);
56static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc); 70static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc);
57static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc); 71static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc);
58static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc); 72static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc);
59static void mps_death(struct k_message *msg, struct mpoa_client *mpc); 73static void mps_death(struct k_message *msg, struct mpoa_client *mpc);
60static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action); 74static void clean_up(struct k_message *msg, struct mpoa_client *mpc,
61static void MPOA_cache_impos_rcvd(struct k_message *msg, struct mpoa_client *mpc); 75 int action);
62static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc); 76static void MPOA_cache_impos_rcvd(struct k_message *msg,
63static void set_mps_mac_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc); 77 struct mpoa_client *mpc);
78static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg,
79 struct mpoa_client *mpc);
80static void set_mps_mac_addr_rcvd(struct k_message *mesg,
81 struct mpoa_client *mpc);
64 82
65static const uint8_t *copy_macs(struct mpoa_client *mpc, 83static const uint8_t *copy_macs(struct mpoa_client *mpc,
66 const uint8_t *router_mac, 84 const uint8_t *router_mac,
@@ -74,10 +92,11 @@ static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb);
74 92
75static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb); 93static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb);
76static netdev_tx_t mpc_send_packet(struct sk_buff *skb, 94static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
77 struct net_device *dev); 95 struct net_device *dev);
78static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned long event, void *dev); 96static int mpoa_event_listener(struct notifier_block *mpoa_notifier,
97 unsigned long event, void *dev);
79static void mpc_timer_refresh(void); 98static void mpc_timer_refresh(void);
80static void mpc_cache_check( unsigned long checking_time ); 99static void mpc_cache_check(unsigned long checking_time);
81 100
82static struct llc_snap_hdr llc_snap_mpoa_ctrl = { 101static struct llc_snap_hdr llc_snap_mpoa_ctrl = {
83 0xaa, 0xaa, 0x03, 102 0xaa, 0xaa, 0x03,
@@ -167,7 +186,7 @@ struct atm_mpoa_qos *atm_mpoa_add_qos(__be32 dst_ip, struct atm_qos *qos)
167 186
168 entry = kmalloc(sizeof(struct atm_mpoa_qos), GFP_KERNEL); 187 entry = kmalloc(sizeof(struct atm_mpoa_qos), GFP_KERNEL);
169 if (entry == NULL) { 188 if (entry == NULL) {
170 printk("mpoa: atm_mpoa_add_qos: out of memory\n"); 189 pr_info("mpoa: out of memory\n");
171 return entry; 190 return entry;
172 } 191 }
173 192
@@ -185,10 +204,9 @@ struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip)
185 struct atm_mpoa_qos *qos; 204 struct atm_mpoa_qos *qos;
186 205
187 qos = qos_head; 206 qos = qos_head;
188 while( qos != NULL ){ 207 while (qos) {
189 if(qos->ipaddr == dst_ip) { 208 if (qos->ipaddr == dst_ip)
190 break; 209 break;
191 }
192 qos = qos->next; 210 qos = qos->next;
193 } 211 }
194 212
@@ -200,10 +218,10 @@ struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip)
200 */ 218 */
201int atm_mpoa_delete_qos(struct atm_mpoa_qos *entry) 219int atm_mpoa_delete_qos(struct atm_mpoa_qos *entry)
202{ 220{
203
204 struct atm_mpoa_qos *curr; 221 struct atm_mpoa_qos *curr;
205 222
206 if (entry == NULL) return 0; 223 if (entry == NULL)
224 return 0;
207 if (entry == qos_head) { 225 if (entry == qos_head) {
208 qos_head = qos_head->next; 226 qos_head = qos_head->next;
209 kfree(entry); 227 kfree(entry);
@@ -234,9 +252,17 @@ void atm_mpoa_disp_qos(struct seq_file *m)
234 252
235 while (qos != NULL) { 253 while (qos != NULL) {
236 seq_printf(m, "%pI4\n %-7d %-7d %-7d %-7d %-7d\n %-7d %-7d %-7d %-7d %-7d\n", 254 seq_printf(m, "%pI4\n %-7d %-7d %-7d %-7d %-7d\n %-7d %-7d %-7d %-7d %-7d\n",
237 &qos->ipaddr, 255 &qos->ipaddr,
238 qos->qos.txtp.max_pcr, qos->qos.txtp.pcr, qos->qos.txtp.min_pcr, qos->qos.txtp.max_cdv, qos->qos.txtp.max_sdu, 256 qos->qos.txtp.max_pcr,
239 qos->qos.rxtp.max_pcr, qos->qos.rxtp.pcr, qos->qos.rxtp.min_pcr, qos->qos.rxtp.max_cdv, qos->qos.rxtp.max_sdu); 257 qos->qos.txtp.pcr,
258 qos->qos.txtp.min_pcr,
259 qos->qos.txtp.max_cdv,
260 qos->qos.txtp.max_sdu,
261 qos->qos.rxtp.max_pcr,
262 qos->qos.rxtp.pcr,
263 qos->qos.rxtp.min_pcr,
264 qos->qos.rxtp.max_cdv,
265 qos->qos.rxtp.max_sdu);
240 qos = qos->next; 266 qos = qos->next;
241 } 267 }
242} 268}
@@ -256,7 +282,7 @@ static struct mpoa_client *alloc_mpc(void)
256{ 282{
257 struct mpoa_client *mpc; 283 struct mpoa_client *mpc;
258 284
259 mpc = kzalloc(sizeof (struct mpoa_client), GFP_KERNEL); 285 mpc = kzalloc(sizeof(struct mpoa_client), GFP_KERNEL);
260 if (mpc == NULL) 286 if (mpc == NULL)
261 return NULL; 287 return NULL;
262 rwlock_init(&mpc->ingress_lock); 288 rwlock_init(&mpc->ingress_lock);
@@ -266,7 +292,7 @@ static struct mpoa_client *alloc_mpc(void)
266 292
267 mpc->parameters.mpc_p1 = MPC_P1; 293 mpc->parameters.mpc_p1 = MPC_P1;
268 mpc->parameters.mpc_p2 = MPC_P2; 294 mpc->parameters.mpc_p2 = MPC_P2;
269 memset(mpc->parameters.mpc_p3,0,sizeof(mpc->parameters.mpc_p3)); 295 memset(mpc->parameters.mpc_p3, 0, sizeof(mpc->parameters.mpc_p3));
270 mpc->parameters.mpc_p4 = MPC_P4; 296 mpc->parameters.mpc_p4 = MPC_P4;
271 mpc->parameters.mpc_p5 = MPC_P5; 297 mpc->parameters.mpc_p5 = MPC_P5;
272 mpc->parameters.mpc_p6 = MPC_P6; 298 mpc->parameters.mpc_p6 = MPC_P6;
@@ -286,9 +312,9 @@ static struct mpoa_client *alloc_mpc(void)
286static void start_mpc(struct mpoa_client *mpc, struct net_device *dev) 312static void start_mpc(struct mpoa_client *mpc, struct net_device *dev)
287{ 313{
288 314
289 dprintk("mpoa: (%s) start_mpc:\n", mpc->dev->name); 315 dprintk("(%s)\n", mpc->dev->name);
290 if (!dev->netdev_ops) 316 if (!dev->netdev_ops)
291 printk("mpoa: (%s) start_mpc not starting\n", dev->name); 317 pr_info("(%s) not starting\n", dev->name);
292 else { 318 else {
293 mpc->old_ops = dev->netdev_ops; 319 mpc->old_ops = dev->netdev_ops;
294 mpc->new_ops = *mpc->old_ops; 320 mpc->new_ops = *mpc->old_ops;
@@ -300,14 +326,14 @@ static void start_mpc(struct mpoa_client *mpc, struct net_device *dev)
300static void stop_mpc(struct mpoa_client *mpc) 326static void stop_mpc(struct mpoa_client *mpc)
301{ 327{
302 struct net_device *dev = mpc->dev; 328 struct net_device *dev = mpc->dev;
303 dprintk("mpoa: (%s) stop_mpc:", mpc->dev->name); 329 dprintk("(%s)", mpc->dev->name);
304 330
305 /* Lets not nullify lec device's dev->hard_start_xmit */ 331 /* Lets not nullify lec device's dev->hard_start_xmit */
306 if (dev->netdev_ops != &mpc->new_ops) { 332 if (dev->netdev_ops != &mpc->new_ops) {
307 dprintk(" mpc already stopped, not fatal\n"); 333 dprintk_cont(" mpc already stopped, not fatal\n");
308 return; 334 return;
309 } 335 }
310 dprintk("\n"); 336 dprintk_cont("\n");
311 337
312 dev->netdev_ops = mpc->old_ops; 338 dev->netdev_ops = mpc->old_ops;
313 mpc->old_ops = NULL; 339 mpc->old_ops = NULL;
@@ -319,25 +345,18 @@ static const char *mpoa_device_type_string(char type) __attribute__ ((unused));
319 345
320static const char *mpoa_device_type_string(char type) 346static const char *mpoa_device_type_string(char type)
321{ 347{
322 switch(type) { 348 switch (type) {
323 case NON_MPOA: 349 case NON_MPOA:
324 return "non-MPOA device"; 350 return "non-MPOA device";
325 break;
326 case MPS: 351 case MPS:
327 return "MPS"; 352 return "MPS";
328 break;
329 case MPC: 353 case MPC:
330 return "MPC"; 354 return "MPC";
331 break;
332 case MPS_AND_MPC: 355 case MPS_AND_MPC:
333 return "both MPS and MPC"; 356 return "both MPS and MPC";
334 break;
335 default:
336 return "unspecified (non-MPOA) device";
337 break;
338 } 357 }
339 358
340 return ""; /* not reached */ 359 return "unspecified (non-MPOA) device";
341} 360}
342 361
343/* 362/*
@@ -362,26 +381,28 @@ static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr,
362 struct mpoa_client *mpc; 381 struct mpoa_client *mpc;
363 382
364 mpoa_device_type = number_of_mps_macs = 0; /* silence gcc */ 383 mpoa_device_type = number_of_mps_macs = 0; /* silence gcc */
365 dprintk("mpoa: (%s) lane2_assoc_ind: received TLV(s), ", dev->name); 384 dprintk("(%s) received TLV(s), ", dev->name);
366 dprintk("total length of all TLVs %d\n", sizeoftlvs); 385 dprintk("total length of all TLVs %d\n", sizeoftlvs);
367 mpc = find_mpc_by_lec(dev); /* Sampo-Fix: moved here from below */ 386 mpc = find_mpc_by_lec(dev); /* Sampo-Fix: moved here from below */
368 if (mpc == NULL) { 387 if (mpc == NULL) {
369 printk("mpoa: (%s) lane2_assoc_ind: no mpc\n", dev->name); 388 pr_info("(%s) no mpc\n", dev->name);
370 return; 389 return;
371 } 390 }
372 end_of_tlvs = tlvs + sizeoftlvs; 391 end_of_tlvs = tlvs + sizeoftlvs;
373 while (end_of_tlvs - tlvs >= 5) { 392 while (end_of_tlvs - tlvs >= 5) {
374 type = (tlvs[0] << 24) | (tlvs[1] << 16) | (tlvs[2] << 8) | tlvs[3]; 393 type = ((tlvs[0] << 24) | (tlvs[1] << 16) |
394 (tlvs[2] << 8) | tlvs[3]);
375 length = tlvs[4]; 395 length = tlvs[4];
376 tlvs += 5; 396 tlvs += 5;
377 dprintk(" type 0x%x length %02x\n", type, length); 397 dprintk(" type 0x%x length %02x\n", type, length);
378 if (tlvs + length > end_of_tlvs) { 398 if (tlvs + length > end_of_tlvs) {
379 printk("TLV value extends past its buffer, aborting parse\n"); 399 pr_info("TLV value extends past its buffer, aborting parse\n");
380 return; 400 return;
381 } 401 }
382 402
383 if (type == 0) { 403 if (type == 0) {
384 printk("mpoa: (%s) lane2_assoc_ind: TLV type was 0, returning\n", dev->name); 404 pr_info("mpoa: (%s) TLV type was 0, returning\n",
405 dev->name);
385 return; 406 return;
386 } 407 }
387 408
@@ -391,39 +412,48 @@ static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr,
391 } 412 }
392 mpoa_device_type = *tlvs++; 413 mpoa_device_type = *tlvs++;
393 number_of_mps_macs = *tlvs++; 414 number_of_mps_macs = *tlvs++;
394 dprintk("mpoa: (%s) MPOA device type '%s', ", dev->name, mpoa_device_type_string(mpoa_device_type)); 415 dprintk("(%s) MPOA device type '%s', ",
416 dev->name, mpoa_device_type_string(mpoa_device_type));
395 if (mpoa_device_type == MPS_AND_MPC && 417 if (mpoa_device_type == MPS_AND_MPC &&
396 length < (42 + number_of_mps_macs*ETH_ALEN)) { /* :) */ 418 length < (42 + number_of_mps_macs*ETH_ALEN)) { /* :) */
397 printk("\nmpoa: (%s) lane2_assoc_ind: short MPOA Device Type TLV\n", 419 pr_info("(%s) short MPOA Device Type TLV\n",
398 dev->name); 420 dev->name);
399 continue; 421 continue;
400 } 422 }
401 if ((mpoa_device_type == MPS || mpoa_device_type == MPC) 423 if ((mpoa_device_type == MPS || mpoa_device_type == MPC) &&
402 && length < 22 + number_of_mps_macs*ETH_ALEN) { 424 length < 22 + number_of_mps_macs*ETH_ALEN) {
403 printk("\nmpoa: (%s) lane2_assoc_ind: short MPOA Device Type TLV\n", 425 pr_info("(%s) short MPOA Device Type TLV\n", dev->name);
404 dev->name);
405 continue; 426 continue;
406 } 427 }
407 if (mpoa_device_type != MPS && mpoa_device_type != MPS_AND_MPC) { 428 if (mpoa_device_type != MPS &&
408 dprintk("ignoring non-MPS device\n"); 429 mpoa_device_type != MPS_AND_MPC) {
409 if (mpoa_device_type == MPC) tlvs += 20; 430 dprintk("ignoring non-MPS device ");
431 if (mpoa_device_type == MPC)
432 tlvs += 20;
410 continue; /* we are only interested in MPSs */ 433 continue; /* we are only interested in MPSs */
411 } 434 }
412 if (number_of_mps_macs == 0 && mpoa_device_type == MPS_AND_MPC) { 435 if (number_of_mps_macs == 0 &&
413 printk("\nmpoa: (%s) lane2_assoc_ind: MPS_AND_MPC has zero MACs\n", dev->name); 436 mpoa_device_type == MPS_AND_MPC) {
437 pr_info("(%s) MPS_AND_MPC has zero MACs\n", dev->name);
414 continue; /* someone should read the spec */ 438 continue; /* someone should read the spec */
415 } 439 }
416 dprintk("this MPS has %d MAC addresses\n", number_of_mps_macs); 440 dprintk_cont("this MPS has %d MAC addresses\n",
441 number_of_mps_macs);
417 442
418 /* ok, now we can go and tell our daemon the control address of MPS */ 443 /*
444 * ok, now we can go and tell our daemon
445 * the control address of MPS
446 */
419 send_set_mps_ctrl_addr(tlvs, mpc); 447 send_set_mps_ctrl_addr(tlvs, mpc);
420 448
421 tlvs = copy_macs(mpc, mac_addr, tlvs, number_of_mps_macs, mpoa_device_type); 449 tlvs = copy_macs(mpc, mac_addr, tlvs,
422 if (tlvs == NULL) return; 450 number_of_mps_macs, mpoa_device_type);
451 if (tlvs == NULL)
452 return;
423 } 453 }
424 if (end_of_tlvs - tlvs != 0) 454 if (end_of_tlvs - tlvs != 0)
425 printk("mpoa: (%s) lane2_assoc_ind: ignoring %Zd bytes of trailing TLV carbage\n", 455 pr_info("(%s) ignoring %Zd bytes of trailing TLV garbage\n",
426 dev->name, end_of_tlvs - tlvs); 456 dev->name, end_of_tlvs - tlvs);
427 return; 457 return;
428} 458}
429 459
@@ -441,11 +471,12 @@ static const uint8_t *copy_macs(struct mpoa_client *mpc,
441 num_macs = (mps_macs > 1) ? mps_macs : 1; 471 num_macs = (mps_macs > 1) ? mps_macs : 1;
442 472
443 if (mpc->number_of_mps_macs != num_macs) { /* need to reallocate? */ 473 if (mpc->number_of_mps_macs != num_macs) { /* need to reallocate? */
444 if (mpc->number_of_mps_macs != 0) kfree(mpc->mps_macs); 474 if (mpc->number_of_mps_macs != 0)
475 kfree(mpc->mps_macs);
445 mpc->number_of_mps_macs = 0; 476 mpc->number_of_mps_macs = 0;
446 mpc->mps_macs = kmalloc(num_macs*ETH_ALEN, GFP_KERNEL); 477 mpc->mps_macs = kmalloc(num_macs * ETH_ALEN, GFP_KERNEL);
447 if (mpc->mps_macs == NULL) { 478 if (mpc->mps_macs == NULL) {
448 printk("mpoa: (%s) copy_macs: out of mem\n", mpc->dev->name); 479 pr_info("(%s) out of mem\n", mpc->dev->name);
449 return NULL; 480 return NULL;
450 } 481 }
451 } 482 }
@@ -478,24 +509,30 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
478 iph = (struct iphdr *)buff; 509 iph = (struct iphdr *)buff;
479 ipaddr = iph->daddr; 510 ipaddr = iph->daddr;
480 511
481 ddprintk("mpoa: (%s) send_via_shortcut: ipaddr 0x%x\n", mpc->dev->name, ipaddr); 512 ddprintk("(%s) ipaddr 0x%x\n",
513 mpc->dev->name, ipaddr);
482 514
483 entry = mpc->in_ops->get(ipaddr, mpc); 515 entry = mpc->in_ops->get(ipaddr, mpc);
484 if (entry == NULL) { 516 if (entry == NULL) {
485 entry = mpc->in_ops->add_entry(ipaddr, mpc); 517 entry = mpc->in_ops->add_entry(ipaddr, mpc);
486 if (entry != NULL) mpc->in_ops->put(entry); 518 if (entry != NULL)
519 mpc->in_ops->put(entry);
487 return 1; 520 return 1;
488 } 521 }
489 if (mpc->in_ops->cache_hit(entry, mpc) != OPEN){ /* threshold not exceeded or VCC not ready */ 522 /* threshold not exceeded or VCC not ready */
490 ddprintk("mpoa: (%s) send_via_shortcut: cache_hit: returns != OPEN\n", mpc->dev->name); 523 if (mpc->in_ops->cache_hit(entry, mpc) != OPEN) {
524 ddprintk("(%s) cache_hit: returns != OPEN\n",
525 mpc->dev->name);
491 mpc->in_ops->put(entry); 526 mpc->in_ops->put(entry);
492 return 1; 527 return 1;
493 } 528 }
494 529
495 ddprintk("mpoa: (%s) send_via_shortcut: using shortcut\n", mpc->dev->name); 530 ddprintk("(%s) using shortcut\n",
531 mpc->dev->name);
496 /* MPOA spec A.1.4, MPOA client must decrement IP ttl at least by one */ 532 /* MPOA spec A.1.4, MPOA client must decrement IP ttl at least by one */
497 if (iph->ttl <= 1) { 533 if (iph->ttl <= 1) {
498 ddprintk("mpoa: (%s) send_via_shortcut: IP ttl = %u, using LANE\n", mpc->dev->name, iph->ttl); 534 ddprintk("(%s) IP ttl = %u, using LANE\n",
535 mpc->dev->name, iph->ttl);
499 mpc->in_ops->put(entry); 536 mpc->in_ops->put(entry);
500 return 1; 537 return 1;
501 } 538 }
@@ -504,15 +541,18 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
504 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 541 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
505 542
506 if (entry->ctrl_info.tag != 0) { 543 if (entry->ctrl_info.tag != 0) {
507 ddprintk("mpoa: (%s) send_via_shortcut: adding tag 0x%x\n", mpc->dev->name, entry->ctrl_info.tag); 544 ddprintk("(%s) adding tag 0x%x\n",
545 mpc->dev->name, entry->ctrl_info.tag);
508 tagged_llc_snap_hdr.tag = entry->ctrl_info.tag; 546 tagged_llc_snap_hdr.tag = entry->ctrl_info.tag;
509 skb_pull(skb, ETH_HLEN); /* get rid of Eth header */ 547 skb_pull(skb, ETH_HLEN); /* get rid of Eth header */
510 skb_push(skb, sizeof(tagged_llc_snap_hdr)); /* add LLC/SNAP header */ 548 skb_push(skb, sizeof(tagged_llc_snap_hdr));
549 /* add LLC/SNAP header */
511 skb_copy_to_linear_data(skb, &tagged_llc_snap_hdr, 550 skb_copy_to_linear_data(skb, &tagged_llc_snap_hdr,
512 sizeof(tagged_llc_snap_hdr)); 551 sizeof(tagged_llc_snap_hdr));
513 } else { 552 } else {
514 skb_pull(skb, ETH_HLEN); /* get rid of Eth header */ 553 skb_pull(skb, ETH_HLEN); /* get rid of Eth header */
515 skb_push(skb, sizeof(struct llc_snap_hdr)); /* add LLC/SNAP header + tag */ 554 skb_push(skb, sizeof(struct llc_snap_hdr));
555 /* add LLC/SNAP header + tag */
516 skb_copy_to_linear_data(skb, &llc_snap_mpoa_data, 556 skb_copy_to_linear_data(skb, &llc_snap_mpoa_data,
517 sizeof(struct llc_snap_hdr)); 557 sizeof(struct llc_snap_hdr));
518 } 558 }
@@ -537,8 +577,8 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
537 int i = 0; 577 int i = 0;
538 578
539 mpc = find_mpc_by_lec(dev); /* this should NEVER fail */ 579 mpc = find_mpc_by_lec(dev); /* this should NEVER fail */
540 if(mpc == NULL) { 580 if (mpc == NULL) {
541 printk("mpoa: (%s) mpc_send_packet: no MPC found\n", dev->name); 581 pr_info("(%s) no MPC found\n", dev->name);
542 goto non_ip; 582 goto non_ip;
543 } 583 }
544 584
@@ -554,14 +594,15 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
554 goto non_ip; 594 goto non_ip;
555 595
556 while (i < mpc->number_of_mps_macs) { 596 while (i < mpc->number_of_mps_macs) {
557 if (!compare_ether_addr(eth->h_dest, (mpc->mps_macs + i*ETH_ALEN))) 597 if (!compare_ether_addr(eth->h_dest,
558 if ( send_via_shortcut(skb, mpc) == 0 ) /* try shortcut */ 598 (mpc->mps_macs + i*ETH_ALEN)))
559 return NETDEV_TX_OK; /* success! */ 599 if (send_via_shortcut(skb, mpc) == 0) /* try shortcut */
600 return NETDEV_TX_OK;
560 i++; 601 i++;
561 } 602 }
562 603
563 non_ip: 604non_ip:
564 return mpc->old_ops->ndo_start_xmit(skb,dev); 605 return mpc->old_ops->ndo_start_xmit(skb, dev);
565} 606}
566 607
567static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg) 608static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
@@ -574,7 +615,8 @@ static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
574 615
575 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmmpc_ioc)); 616 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmmpc_ioc));
576 if (bytes_left != 0) { 617 if (bytes_left != 0) {
577 printk("mpoa: mpc_vcc_attach: Short read (missed %d bytes) from userland\n", bytes_left); 618 pr_info("mpoa:Short read (missed %d bytes) from userland\n",
619 bytes_left);
578 return -EFAULT; 620 return -EFAULT;
579 } 621 }
580 ipaddr = ioc_data.ipaddr; 622 ipaddr = ioc_data.ipaddr;
@@ -587,18 +629,20 @@ static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
587 629
588 if (ioc_data.type == MPC_SOCKET_INGRESS) { 630 if (ioc_data.type == MPC_SOCKET_INGRESS) {
589 in_entry = mpc->in_ops->get(ipaddr, mpc); 631 in_entry = mpc->in_ops->get(ipaddr, mpc);
590 if (in_entry == NULL || in_entry->entry_state < INGRESS_RESOLVED) { 632 if (in_entry == NULL ||
591 printk("mpoa: (%s) mpc_vcc_attach: did not find RESOLVED entry from ingress cache\n", 633 in_entry->entry_state < INGRESS_RESOLVED) {
634 pr_info("(%s) did not find RESOLVED entry from ingress cache\n",
592 mpc->dev->name); 635 mpc->dev->name);
593 if (in_entry != NULL) mpc->in_ops->put(in_entry); 636 if (in_entry != NULL)
637 mpc->in_ops->put(in_entry);
594 return -EINVAL; 638 return -EINVAL;
595 } 639 }
596 printk("mpoa: (%s) mpc_vcc_attach: attaching ingress SVC, entry = %pI4\n", 640 pr_info("(%s) attaching ingress SVC, entry = %pI4\n",
597 mpc->dev->name, &in_entry->ctrl_info.in_dst_ip); 641 mpc->dev->name, &in_entry->ctrl_info.in_dst_ip);
598 in_entry->shortcut = vcc; 642 in_entry->shortcut = vcc;
599 mpc->in_ops->put(in_entry); 643 mpc->in_ops->put(in_entry);
600 } else { 644 } else {
601 printk("mpoa: (%s) mpc_vcc_attach: attaching egress SVC\n", mpc->dev->name); 645 pr_info("(%s) attaching egress SVC\n", mpc->dev->name);
602 } 646 }
603 647
604 vcc->proto_data = mpc->dev; 648 vcc->proto_data = mpc->dev;
@@ -618,27 +662,27 @@ static void mpc_vcc_close(struct atm_vcc *vcc, struct net_device *dev)
618 662
619 mpc = find_mpc_by_lec(dev); 663 mpc = find_mpc_by_lec(dev);
620 if (mpc == NULL) { 664 if (mpc == NULL) {
621 printk("mpoa: (%s) mpc_vcc_close: close for unknown MPC\n", dev->name); 665 pr_info("(%s) close for unknown MPC\n", dev->name);
622 return; 666 return;
623 } 667 }
624 668
625 dprintk("mpoa: (%s) mpc_vcc_close:\n", dev->name); 669 dprintk("(%s)\n", dev->name);
626 in_entry = mpc->in_ops->get_by_vcc(vcc, mpc); 670 in_entry = mpc->in_ops->get_by_vcc(vcc, mpc);
627 if (in_entry) { 671 if (in_entry) {
628 dprintk("mpoa: (%s) mpc_vcc_close: ingress SVC closed ip = %pI4\n", 672 dprintk("(%s) ingress SVC closed ip = %pI4\n",
629 mpc->dev->name, &in_entry->ctrl_info.in_dst_ip); 673 mpc->dev->name, &in_entry->ctrl_info.in_dst_ip);
630 in_entry->shortcut = NULL; 674 in_entry->shortcut = NULL;
631 mpc->in_ops->put(in_entry); 675 mpc->in_ops->put(in_entry);
632 } 676 }
633 eg_entry = mpc->eg_ops->get_by_vcc(vcc, mpc); 677 eg_entry = mpc->eg_ops->get_by_vcc(vcc, mpc);
634 if (eg_entry) { 678 if (eg_entry) {
635 dprintk("mpoa: (%s) mpc_vcc_close: egress SVC closed\n", mpc->dev->name); 679 dprintk("(%s) egress SVC closed\n", mpc->dev->name);
636 eg_entry->shortcut = NULL; 680 eg_entry->shortcut = NULL;
637 mpc->eg_ops->put(eg_entry); 681 mpc->eg_ops->put(eg_entry);
638 } 682 }
639 683
640 if (in_entry == NULL && eg_entry == NULL) 684 if (in_entry == NULL && eg_entry == NULL)
641 dprintk("mpoa: (%s) mpc_vcc_close: unused vcc closed\n", dev->name); 685 dprintk("(%s) unused vcc closed\n", dev->name);
642 686
643 return; 687 return;
644} 688}
@@ -652,18 +696,19 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
652 __be32 tag; 696 __be32 tag;
653 char *tmp; 697 char *tmp;
654 698
655 ddprintk("mpoa: (%s) mpc_push:\n", dev->name); 699 ddprintk("(%s)\n", dev->name);
656 if (skb == NULL) { 700 if (skb == NULL) {
657 dprintk("mpoa: (%s) mpc_push: null skb, closing VCC\n", dev->name); 701 dprintk("(%s) null skb, closing VCC\n", dev->name);
658 mpc_vcc_close(vcc, dev); 702 mpc_vcc_close(vcc, dev);
659 return; 703 return;
660 } 704 }
661 705
662 skb->dev = dev; 706 skb->dev = dev;
663 if (memcmp(skb->data, &llc_snap_mpoa_ctrl, sizeof(struct llc_snap_hdr)) == 0) { 707 if (memcmp(skb->data, &llc_snap_mpoa_ctrl,
708 sizeof(struct llc_snap_hdr)) == 0) {
664 struct sock *sk = sk_atm(vcc); 709 struct sock *sk = sk_atm(vcc);
665 710
666 dprintk("mpoa: (%s) mpc_push: control packet arrived\n", dev->name); 711 dprintk("(%s) control packet arrived\n", dev->name);
667 /* Pass control packets to daemon */ 712 /* Pass control packets to daemon */
668 skb_queue_tail(&sk->sk_receive_queue, skb); 713 skb_queue_tail(&sk->sk_receive_queue, skb);
669 sk->sk_data_ready(sk, skb->len); 714 sk->sk_data_ready(sk, skb->len);
@@ -675,20 +720,22 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
675 720
676 mpc = find_mpc_by_lec(dev); 721 mpc = find_mpc_by_lec(dev);
677 if (mpc == NULL) { 722 if (mpc == NULL) {
678 printk("mpoa: (%s) mpc_push: unknown MPC\n", dev->name); 723 pr_info("(%s) unknown MPC\n", dev->name);
679 return; 724 return;
680 } 725 }
681 726
682 if (memcmp(skb->data, &llc_snap_mpoa_data_tagged, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA tagged data */ 727 if (memcmp(skb->data, &llc_snap_mpoa_data_tagged,
683 ddprintk("mpoa: (%s) mpc_push: tagged data packet arrived\n", dev->name); 728 sizeof(struct llc_snap_hdr)) == 0) { /* MPOA tagged data */
729 ddprintk("(%s) tagged data packet arrived\n", dev->name);
684 730
685 } else if (memcmp(skb->data, &llc_snap_mpoa_data, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA data */ 731 } else if (memcmp(skb->data, &llc_snap_mpoa_data,
686 printk("mpoa: (%s) mpc_push: non-tagged data packet arrived\n", dev->name); 732 sizeof(struct llc_snap_hdr)) == 0) { /* MPOA data */
687 printk(" mpc_push: non-tagged data unsupported, purging\n"); 733 pr_info("(%s) Unsupported non-tagged data packet arrived. Purging\n",
734 dev->name);
688 dev_kfree_skb_any(skb); 735 dev_kfree_skb_any(skb);
689 return; 736 return;
690 } else { 737 } else {
691 printk("mpoa: (%s) mpc_push: garbage arrived, purging\n", dev->name); 738 pr_info("(%s) garbage arrived, purging\n", dev->name);
692 dev_kfree_skb_any(skb); 739 dev_kfree_skb_any(skb);
693 return; 740 return;
694 } 741 }
@@ -698,8 +745,8 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
698 745
699 eg = mpc->eg_ops->get_by_tag(tag, mpc); 746 eg = mpc->eg_ops->get_by_tag(tag, mpc);
700 if (eg == NULL) { 747 if (eg == NULL) {
701 printk("mpoa: (%s) mpc_push: Didn't find egress cache entry, tag = %u\n", 748 pr_info("mpoa: (%s) Didn't find egress cache entry, tag = %u\n",
702 dev->name,tag); 749 dev->name, tag);
703 purge_egress_shortcut(vcc, NULL); 750 purge_egress_shortcut(vcc, NULL);
704 dev_kfree_skb_any(skb); 751 dev_kfree_skb_any(skb);
705 return; 752 return;
@@ -711,13 +758,15 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
711 */ 758 */
712 if (eg->shortcut == NULL) { 759 if (eg->shortcut == NULL) {
713 eg->shortcut = vcc; 760 eg->shortcut = vcc;
714 printk("mpoa: (%s) mpc_push: egress SVC in use\n", dev->name); 761 pr_info("(%s) egress SVC in use\n", dev->name);
715 } 762 }
716 763
717 skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag)); /* get rid of LLC/SNAP header */ 764 skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag));
718 new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length); /* LLC/SNAP is shorter than MAC header :( */ 765 /* get rid of LLC/SNAP header */
766 new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length);
767 /* LLC/SNAP is shorter than MAC header :( */
719 dev_kfree_skb_any(skb); 768 dev_kfree_skb_any(skb);
720 if (new_skb == NULL){ 769 if (new_skb == NULL) {
721 mpc->eg_ops->put(eg); 770 mpc->eg_ops->put(eg);
722 return; 771 return;
723 } 772 }
@@ -750,7 +799,7 @@ static struct atm_dev mpc_dev = {
750 /* members not explicitly initialised will be 0 */ 799 /* members not explicitly initialised will be 0 */
751}; 800};
752 801
753static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg) 802static int atm_mpoa_mpoad_attach(struct atm_vcc *vcc, int arg)
754{ 803{
755 struct mpoa_client *mpc; 804 struct mpoa_client *mpc;
756 struct lec_priv *priv; 805 struct lec_priv *priv;
@@ -770,15 +819,16 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
770 819
771 mpc = find_mpc_by_itfnum(arg); 820 mpc = find_mpc_by_itfnum(arg);
772 if (mpc == NULL) { 821 if (mpc == NULL) {
773 dprintk("mpoa: mpoad_attach: allocating new mpc for itf %d\n", arg); 822 dprintk("allocating new mpc for itf %d\n", arg);
774 mpc = alloc_mpc(); 823 mpc = alloc_mpc();
775 if (mpc == NULL) 824 if (mpc == NULL)
776 return -ENOMEM; 825 return -ENOMEM;
777 mpc->dev_num = arg; 826 mpc->dev_num = arg;
778 mpc->dev = find_lec_by_itfnum(arg); /* NULL if there was no lec */ 827 mpc->dev = find_lec_by_itfnum(arg);
828 /* NULL if there was no lec */
779 } 829 }
780 if (mpc->mpoad_vcc) { 830 if (mpc->mpoad_vcc) {
781 printk("mpoa: mpoad_attach: mpoad is already present for itf %d\n", arg); 831 pr_info("mpoad is already present for itf %d\n", arg);
782 return -EADDRINUSE; 832 return -EADDRINUSE;
783 } 833 }
784 834
@@ -794,8 +844,8 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
794 mpc->mpoad_vcc = vcc; 844 mpc->mpoad_vcc = vcc;
795 vcc->dev = &mpc_dev; 845 vcc->dev = &mpc_dev;
796 vcc_insert_socket(sk_atm(vcc)); 846 vcc_insert_socket(sk_atm(vcc));
797 set_bit(ATM_VF_META,&vcc->flags); 847 set_bit(ATM_VF_META, &vcc->flags);
798 set_bit(ATM_VF_READY,&vcc->flags); 848 set_bit(ATM_VF_READY, &vcc->flags);
799 849
800 if (mpc->dev) { 850 if (mpc->dev) {
801 char empty[ATM_ESA_LEN]; 851 char empty[ATM_ESA_LEN];
@@ -805,7 +855,7 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
805 /* set address if mpcd e.g. gets killed and restarted. 855 /* set address if mpcd e.g. gets killed and restarted.
806 * If we do not do it now we have to wait for the next LE_ARP 856 * If we do not do it now we have to wait for the next LE_ARP
807 */ 857 */
808 if ( memcmp(mpc->mps_ctrl_addr, empty, ATM_ESA_LEN) != 0 ) 858 if (memcmp(mpc->mps_ctrl_addr, empty, ATM_ESA_LEN) != 0)
809 send_set_mps_ctrl_addr(mpc->mps_ctrl_addr, mpc); 859 send_set_mps_ctrl_addr(mpc->mps_ctrl_addr, mpc);
810 } 860 }
811 861
@@ -817,7 +867,7 @@ static void send_set_mps_ctrl_addr(const char *addr, struct mpoa_client *mpc)
817{ 867{
818 struct k_message mesg; 868 struct k_message mesg;
819 869
820 memcpy (mpc->mps_ctrl_addr, addr, ATM_ESA_LEN); 870 memcpy(mpc->mps_ctrl_addr, addr, ATM_ESA_LEN);
821 871
822 mesg.type = SET_MPS_CTRL_ADDR; 872 mesg.type = SET_MPS_CTRL_ADDR;
823 memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN); 873 memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN);
@@ -833,11 +883,11 @@ static void mpoad_close(struct atm_vcc *vcc)
833 883
834 mpc = find_mpc_by_vcc(vcc); 884 mpc = find_mpc_by_vcc(vcc);
835 if (mpc == NULL) { 885 if (mpc == NULL) {
836 printk("mpoa: mpoad_close: did not find MPC\n"); 886 pr_info("did not find MPC\n");
837 return; 887 return;
838 } 888 }
839 if (!mpc->mpoad_vcc) { 889 if (!mpc->mpoad_vcc) {
840 printk("mpoa: mpoad_close: close for non-present mpoad\n"); 890 pr_info("close for non-present mpoad\n");
841 return; 891 return;
842 } 892 }
843 893
@@ -857,7 +907,7 @@ static void mpoad_close(struct atm_vcc *vcc)
857 kfree_skb(skb); 907 kfree_skb(skb);
858 } 908 }
859 909
860 printk("mpoa: (%s) going down\n", 910 pr_info("(%s) going down\n",
861 (mpc->dev) ? mpc->dev->name : "<unknown>"); 911 (mpc->dev) ? mpc->dev->name : "<unknown>");
862 module_put(THIS_MODULE); 912 module_put(THIS_MODULE);
863 913
@@ -871,61 +921,61 @@ static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb)
871{ 921{
872 922
873 struct mpoa_client *mpc = find_mpc_by_vcc(vcc); 923 struct mpoa_client *mpc = find_mpc_by_vcc(vcc);
874 struct k_message *mesg = (struct k_message*)skb->data; 924 struct k_message *mesg = (struct k_message *)skb->data;
875 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 925 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
876 926
877 if (mpc == NULL) { 927 if (mpc == NULL) {
878 printk("mpoa: msg_from_mpoad: no mpc found\n"); 928 pr_info("no mpc found\n");
879 return 0; 929 return 0;
880 } 930 }
881 dprintk("mpoa: (%s) msg_from_mpoad:", (mpc->dev) ? mpc->dev->name : "<unknown>"); 931 dprintk("(%s)", mpc->dev ? mpc->dev->name : "<unknown>");
882 switch(mesg->type) { 932 switch (mesg->type) {
883 case MPOA_RES_REPLY_RCVD: 933 case MPOA_RES_REPLY_RCVD:
884 dprintk(" mpoa_res_reply_rcvd\n"); 934 dprintk_cont("mpoa_res_reply_rcvd\n");
885 MPOA_res_reply_rcvd(mesg, mpc); 935 MPOA_res_reply_rcvd(mesg, mpc);
886 break; 936 break;
887 case MPOA_TRIGGER_RCVD: 937 case MPOA_TRIGGER_RCVD:
888 dprintk(" mpoa_trigger_rcvd\n"); 938 dprintk_cont("mpoa_trigger_rcvd\n");
889 MPOA_trigger_rcvd(mesg, mpc); 939 MPOA_trigger_rcvd(mesg, mpc);
890 break; 940 break;
891 case INGRESS_PURGE_RCVD: 941 case INGRESS_PURGE_RCVD:
892 dprintk(" nhrp_purge_rcvd\n"); 942 dprintk_cont("nhrp_purge_rcvd\n");
893 ingress_purge_rcvd(mesg, mpc); 943 ingress_purge_rcvd(mesg, mpc);
894 break; 944 break;
895 case EGRESS_PURGE_RCVD: 945 case EGRESS_PURGE_RCVD:
896 dprintk(" egress_purge_reply_rcvd\n"); 946 dprintk_cont("egress_purge_reply_rcvd\n");
897 egress_purge_rcvd(mesg, mpc); 947 egress_purge_rcvd(mesg, mpc);
898 break; 948 break;
899 case MPS_DEATH: 949 case MPS_DEATH:
900 dprintk(" mps_death\n"); 950 dprintk_cont("mps_death\n");
901 mps_death(mesg, mpc); 951 mps_death(mesg, mpc);
902 break; 952 break;
903 case CACHE_IMPOS_RCVD: 953 case CACHE_IMPOS_RCVD:
904 dprintk(" cache_impos_rcvd\n"); 954 dprintk_cont("cache_impos_rcvd\n");
905 MPOA_cache_impos_rcvd(mesg, mpc); 955 MPOA_cache_impos_rcvd(mesg, mpc);
906 break; 956 break;
907 case SET_MPC_CTRL_ADDR: 957 case SET_MPC_CTRL_ADDR:
908 dprintk(" set_mpc_ctrl_addr\n"); 958 dprintk_cont("set_mpc_ctrl_addr\n");
909 set_mpc_ctrl_addr_rcvd(mesg, mpc); 959 set_mpc_ctrl_addr_rcvd(mesg, mpc);
910 break; 960 break;
911 case SET_MPS_MAC_ADDR: 961 case SET_MPS_MAC_ADDR:
912 dprintk(" set_mps_mac_addr\n"); 962 dprintk_cont("set_mps_mac_addr\n");
913 set_mps_mac_addr_rcvd(mesg, mpc); 963 set_mps_mac_addr_rcvd(mesg, mpc);
914 break; 964 break;
915 case CLEAN_UP_AND_EXIT: 965 case CLEAN_UP_AND_EXIT:
916 dprintk(" clean_up_and_exit\n"); 966 dprintk_cont("clean_up_and_exit\n");
917 clean_up(mesg, mpc, DIE); 967 clean_up(mesg, mpc, DIE);
918 break; 968 break;
919 case RELOAD: 969 case RELOAD:
920 dprintk(" reload\n"); 970 dprintk_cont("reload\n");
921 clean_up(mesg, mpc, RELOAD); 971 clean_up(mesg, mpc, RELOAD);
922 break; 972 break;
923 case SET_MPC_PARAMS: 973 case SET_MPC_PARAMS:
924 dprintk(" set_mpc_params\n"); 974 dprintk_cont("set_mpc_params\n");
925 mpc->parameters = mesg->content.params; 975 mpc->parameters = mesg->content.params;
926 break; 976 break;
927 default: 977 default:
928 dprintk(" unknown message %d\n", mesg->type); 978 dprintk_cont("unknown message %d\n", mesg->type);
929 break; 979 break;
930 } 980 }
931 kfree_skb(skb); 981 kfree_skb(skb);
@@ -940,7 +990,7 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
940 struct sock *sk; 990 struct sock *sk;
941 991
942 if (mpc == NULL || !mpc->mpoad_vcc) { 992 if (mpc == NULL || !mpc->mpoad_vcc) {
943 printk("mpoa: msg_to_mpoad: mesg %d to a non-existent mpoad\n", mesg->type); 993 pr_info("mesg %d to a non-existent mpoad\n", mesg->type);
944 return -ENXIO; 994 return -ENXIO;
945 } 995 }
946 996
@@ -958,7 +1008,8 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
958 return 0; 1008 return 0;
959} 1009}
960 1010
961static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned long event, void *dev_ptr) 1011static int mpoa_event_listener(struct notifier_block *mpoa_notifier,
1012 unsigned long event, void *dev_ptr)
962{ 1013{
963 struct net_device *dev; 1014 struct net_device *dev;
964 struct mpoa_client *mpc; 1015 struct mpoa_client *mpc;
@@ -980,25 +1031,24 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo
980 priv->lane2_ops->associate_indicator = lane2_assoc_ind; 1031 priv->lane2_ops->associate_indicator = lane2_assoc_ind;
981 mpc = find_mpc_by_itfnum(priv->itfnum); 1032 mpc = find_mpc_by_itfnum(priv->itfnum);
982 if (mpc == NULL) { 1033 if (mpc == NULL) {
983 dprintk("mpoa: mpoa_event_listener: allocating new mpc for %s\n", 1034 dprintk("allocating new mpc for %s\n", dev->name);
984 dev->name);
985 mpc = alloc_mpc(); 1035 mpc = alloc_mpc();
986 if (mpc == NULL) { 1036 if (mpc == NULL) {
987 printk("mpoa: mpoa_event_listener: no new mpc"); 1037 pr_info("no new mpc");
988 break; 1038 break;
989 } 1039 }
990 } 1040 }
991 mpc->dev_num = priv->itfnum; 1041 mpc->dev_num = priv->itfnum;
992 mpc->dev = dev; 1042 mpc->dev = dev;
993 dev_hold(dev); 1043 dev_hold(dev);
994 dprintk("mpoa: (%s) was initialized\n", dev->name); 1044 dprintk("(%s) was initialized\n", dev->name);
995 break; 1045 break;
996 case NETDEV_UNREGISTER: 1046 case NETDEV_UNREGISTER:
997 /* the lec device was deallocated */ 1047 /* the lec device was deallocated */
998 mpc = find_mpc_by_lec(dev); 1048 mpc = find_mpc_by_lec(dev);
999 if (mpc == NULL) 1049 if (mpc == NULL)
1000 break; 1050 break;
1001 dprintk("mpoa: device (%s) was deallocated\n", dev->name); 1051 dprintk("device (%s) was deallocated\n", dev->name);
1002 stop_mpc(mpc); 1052 stop_mpc(mpc);
1003 dev_put(mpc->dev); 1053 dev_put(mpc->dev);
1004 mpc->dev = NULL; 1054 mpc->dev = NULL;
@@ -1008,9 +1058,8 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo
1008 mpc = find_mpc_by_lec(dev); 1058 mpc = find_mpc_by_lec(dev);
1009 if (mpc == NULL) 1059 if (mpc == NULL)
1010 break; 1060 break;
1011 if (mpc->mpoad_vcc != NULL) { 1061 if (mpc->mpoad_vcc != NULL)
1012 start_mpc(mpc, dev); 1062 start_mpc(mpc, dev);
1013 }
1014 break; 1063 break;
1015 case NETDEV_DOWN: 1064 case NETDEV_DOWN:
1016 /* the dev was ifconfig'ed down */ 1065 /* the dev was ifconfig'ed down */
@@ -1020,9 +1069,8 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo
1020 mpc = find_mpc_by_lec(dev); 1069 mpc = find_mpc_by_lec(dev);
1021 if (mpc == NULL) 1070 if (mpc == NULL)
1022 break; 1071 break;
1023 if (mpc->mpoad_vcc != NULL) { 1072 if (mpc->mpoad_vcc != NULL)
1024 stop_mpc(mpc); 1073 stop_mpc(mpc);
1025 }
1026 break; 1074 break;
1027 case NETDEV_REBOOT: 1075 case NETDEV_REBOOT:
1028 case NETDEV_CHANGE: 1076 case NETDEV_CHANGE:
@@ -1049,7 +1097,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1049 in_cache_entry *entry; 1097 in_cache_entry *entry;
1050 1098
1051 entry = mpc->in_ops->get(dst_ip, mpc); 1099 entry = mpc->in_ops->get(dst_ip, mpc);
1052 if(entry == NULL){ 1100 if (entry == NULL) {
1053 entry = mpc->in_ops->add_entry(dst_ip, mpc); 1101 entry = mpc->in_ops->add_entry(dst_ip, mpc);
1054 entry->entry_state = INGRESS_RESOLVING; 1102 entry->entry_state = INGRESS_RESOLVING;
1055 msg->type = SND_MPOA_RES_RQST; 1103 msg->type = SND_MPOA_RES_RQST;
@@ -1060,7 +1108,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1060 return; 1108 return;
1061 } 1109 }
1062 1110
1063 if(entry->entry_state == INGRESS_INVALID){ 1111 if (entry->entry_state == INGRESS_INVALID) {
1064 entry->entry_state = INGRESS_RESOLVING; 1112 entry->entry_state = INGRESS_RESOLVING;
1065 msg->type = SND_MPOA_RES_RQST; 1113 msg->type = SND_MPOA_RES_RQST;
1066 msg->content.in_info = entry->ctrl_info; 1114 msg->content.in_info = entry->ctrl_info;
@@ -1070,7 +1118,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1070 return; 1118 return;
1071 } 1119 }
1072 1120
1073 printk("mpoa: (%s) MPOA_trigger_rcvd: entry already in resolving state\n", 1121 pr_info("(%s) entry already in resolving state\n",
1074 (mpc->dev) ? mpc->dev->name : "<unknown>"); 1122 (mpc->dev) ? mpc->dev->name : "<unknown>");
1075 mpc->in_ops->put(entry); 1123 mpc->in_ops->put(entry);
1076 return; 1124 return;
@@ -1080,23 +1128,25 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1080 * Things get complicated because we have to check if there's an egress 1128 * Things get complicated because we have to check if there's an egress
1081 * shortcut with suitable traffic parameters we could use. 1129 * shortcut with suitable traffic parameters we could use.
1082 */ 1130 */
1083static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_client *client, in_cache_entry *entry) 1131static void check_qos_and_open_shortcut(struct k_message *msg,
1132 struct mpoa_client *client,
1133 in_cache_entry *entry)
1084{ 1134{
1085 __be32 dst_ip = msg->content.in_info.in_dst_ip; 1135 __be32 dst_ip = msg->content.in_info.in_dst_ip;
1086 struct atm_mpoa_qos *qos = atm_mpoa_search_qos(dst_ip); 1136 struct atm_mpoa_qos *qos = atm_mpoa_search_qos(dst_ip);
1087 eg_cache_entry *eg_entry = client->eg_ops->get_by_src_ip(dst_ip, client); 1137 eg_cache_entry *eg_entry = client->eg_ops->get_by_src_ip(dst_ip, client);
1088 1138
1089 if(eg_entry && eg_entry->shortcut){ 1139 if (eg_entry && eg_entry->shortcut) {
1090 if(eg_entry->shortcut->qos.txtp.traffic_class & 1140 if (eg_entry->shortcut->qos.txtp.traffic_class &
1091 msg->qos.txtp.traffic_class & 1141 msg->qos.txtp.traffic_class &
1092 (qos ? qos->qos.txtp.traffic_class : ATM_UBR | ATM_CBR)){ 1142 (qos ? qos->qos.txtp.traffic_class : ATM_UBR | ATM_CBR)) {
1093 if(eg_entry->shortcut->qos.txtp.traffic_class == ATM_UBR) 1143 if (eg_entry->shortcut->qos.txtp.traffic_class == ATM_UBR)
1094 entry->shortcut = eg_entry->shortcut; 1144 entry->shortcut = eg_entry->shortcut;
1095 else if(eg_entry->shortcut->qos.txtp.max_pcr > 0) 1145 else if (eg_entry->shortcut->qos.txtp.max_pcr > 0)
1096 entry->shortcut = eg_entry->shortcut; 1146 entry->shortcut = eg_entry->shortcut;
1097 } 1147 }
1098 if(entry->shortcut){ 1148 if (entry->shortcut) {
1099 dprintk("mpoa: (%s) using egress SVC to reach %pI4\n", 1149 dprintk("(%s) using egress SVC to reach %pI4\n",
1100 client->dev->name, &dst_ip); 1150 client->dev->name, &dst_ip);
1101 client->eg_ops->put(eg_entry); 1151 client->eg_ops->put(eg_entry);
1102 return; 1152 return;
@@ -1107,12 +1157,13 @@ static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_clien
1107 1157
1108 /* No luck in the egress cache we must open an ingress SVC */ 1158 /* No luck in the egress cache we must open an ingress SVC */
1109 msg->type = OPEN_INGRESS_SVC; 1159 msg->type = OPEN_INGRESS_SVC;
1110 if (qos && (qos->qos.txtp.traffic_class == msg->qos.txtp.traffic_class)) 1160 if (qos &&
1111 { 1161 (qos->qos.txtp.traffic_class == msg->qos.txtp.traffic_class)) {
1112 msg->qos = qos->qos; 1162 msg->qos = qos->qos;
1113 printk("mpoa: (%s) trying to get a CBR shortcut\n",client->dev->name); 1163 pr_info("(%s) trying to get a CBR shortcut\n",
1114 } 1164 client->dev->name);
1115 else memset(&msg->qos,0,sizeof(struct atm_qos)); 1165 } else
1166 memset(&msg->qos, 0, sizeof(struct atm_qos));
1116 msg_to_mpoad(msg, client); 1167 msg_to_mpoad(msg, client);
1117 return; 1168 return;
1118} 1169}
@@ -1122,17 +1173,19 @@ static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1122 __be32 dst_ip = msg->content.in_info.in_dst_ip; 1173 __be32 dst_ip = msg->content.in_info.in_dst_ip;
1123 in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc); 1174 in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc);
1124 1175
1125 dprintk("mpoa: (%s) MPOA_res_reply_rcvd: ip %pI4\n", 1176 dprintk("(%s) ip %pI4\n",
1126 mpc->dev->name, &dst_ip); 1177 mpc->dev->name, &dst_ip);
1127 ddprintk("mpoa: (%s) MPOA_res_reply_rcvd() entry = %p", mpc->dev->name, entry); 1178 ddprintk("(%s) entry = %p",
1128 if(entry == NULL){ 1179 mpc->dev->name, entry);
1129 printk("\nmpoa: (%s) ARGH, received res. reply for an entry that doesn't exist.\n", mpc->dev->name); 1180 if (entry == NULL) {
1181 pr_info("(%s) ARGH, received res. reply for an entry that doesn't exist.\n",
1182 mpc->dev->name);
1130 return; 1183 return;
1131 } 1184 }
1132 ddprintk(" entry_state = %d ", entry->entry_state); 1185 ddprintk_cont(" entry_state = %d ", entry->entry_state);
1133 1186
1134 if (entry->entry_state == INGRESS_RESOLVED) { 1187 if (entry->entry_state == INGRESS_RESOLVED) {
1135 printk("\nmpoa: (%s) MPOA_res_reply_rcvd for RESOLVED entry!\n", mpc->dev->name); 1188 pr_info("(%s) RESOLVED entry!\n", mpc->dev->name);
1136 mpc->in_ops->put(entry); 1189 mpc->in_ops->put(entry);
1137 return; 1190 return;
1138 } 1191 }
@@ -1141,17 +1194,18 @@ static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1141 do_gettimeofday(&(entry->tv)); 1194 do_gettimeofday(&(entry->tv));
1142 do_gettimeofday(&(entry->reply_wait)); /* Used in refreshing func from now on */ 1195 do_gettimeofday(&(entry->reply_wait)); /* Used in refreshing func from now on */
1143 entry->refresh_time = 0; 1196 entry->refresh_time = 0;
1144 ddprintk("entry->shortcut = %p\n", entry->shortcut); 1197 ddprintk_cont("entry->shortcut = %p\n", entry->shortcut);
1145 1198
1146 if(entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL){ 1199 if (entry->entry_state == INGRESS_RESOLVING &&
1200 entry->shortcut != NULL) {
1147 entry->entry_state = INGRESS_RESOLVED; 1201 entry->entry_state = INGRESS_RESOLVED;
1148 mpc->in_ops->put(entry); 1202 mpc->in_ops->put(entry);
1149 return; /* Shortcut already open... */ 1203 return; /* Shortcut already open... */
1150 } 1204 }
1151 1205
1152 if (entry->shortcut != NULL) { 1206 if (entry->shortcut != NULL) {
1153 printk("mpoa: (%s) MPOA_res_reply_rcvd: entry->shortcut != NULL, impossible!\n", 1207 pr_info("(%s) entry->shortcut != NULL, impossible!\n",
1154 mpc->dev->name); 1208 mpc->dev->name);
1155 mpc->in_ops->put(entry); 1209 mpc->in_ops->put(entry);
1156 return; 1210 return;
1157 } 1211 }
@@ -1170,14 +1224,14 @@ static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1170 __be32 mask = msg->ip_mask; 1224 __be32 mask = msg->ip_mask;
1171 in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask); 1225 in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask);
1172 1226
1173 if(entry == NULL){ 1227 if (entry == NULL) {
1174 printk("mpoa: (%s) ingress_purge_rcvd: purge for a non-existing entry, ip = %pI4\n", 1228 pr_info("(%s) purge for a non-existing entry, ip = %pI4\n",
1175 mpc->dev->name, &dst_ip); 1229 mpc->dev->name, &dst_ip);
1176 return; 1230 return;
1177 } 1231 }
1178 1232
1179 do { 1233 do {
1180 dprintk("mpoa: (%s) ingress_purge_rcvd: removing an ingress entry, ip = %pI4\n", 1234 dprintk("(%s) removing an ingress entry, ip = %pI4\n",
1181 mpc->dev->name, &dst_ip); 1235 mpc->dev->name, &dst_ip);
1182 write_lock_bh(&mpc->ingress_lock); 1236 write_lock_bh(&mpc->ingress_lock);
1183 mpc->in_ops->remove_entry(entry, mpc); 1237 mpc->in_ops->remove_entry(entry, mpc);
@@ -1195,7 +1249,8 @@ static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1195 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc); 1249 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc);
1196 1250
1197 if (entry == NULL) { 1251 if (entry == NULL) {
1198 dprintk("mpoa: (%s) egress_purge_rcvd: purge for a non-existing entry\n", mpc->dev->name); 1252 dprintk("(%s) purge for a non-existing entry\n",
1253 mpc->dev->name);
1199 return; 1254 return;
1200 } 1255 }
1201 1256
@@ -1214,15 +1269,15 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
1214 struct k_message *purge_msg; 1269 struct k_message *purge_msg;
1215 struct sk_buff *skb; 1270 struct sk_buff *skb;
1216 1271
1217 dprintk("mpoa: purge_egress_shortcut: entering\n"); 1272 dprintk("entering\n");
1218 if (vcc == NULL) { 1273 if (vcc == NULL) {
1219 printk("mpoa: purge_egress_shortcut: vcc == NULL\n"); 1274 pr_info("vcc == NULL\n");
1220 return; 1275 return;
1221 } 1276 }
1222 1277
1223 skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC); 1278 skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC);
1224 if (skb == NULL) { 1279 if (skb == NULL) {
1225 printk("mpoa: purge_egress_shortcut: out of memory\n"); 1280 pr_info("out of memory\n");
1226 return; 1281 return;
1227 } 1282 }
1228 1283
@@ -1238,7 +1293,7 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
1238 sk = sk_atm(vcc); 1293 sk = sk_atm(vcc);
1239 skb_queue_tail(&sk->sk_receive_queue, skb); 1294 skb_queue_tail(&sk->sk_receive_queue, skb);
1240 sk->sk_data_ready(sk, skb->len); 1295 sk->sk_data_ready(sk, skb->len);
1241 dprintk("mpoa: purge_egress_shortcut: exiting:\n"); 1296 dprintk("exiting\n");
1242 1297
1243 return; 1298 return;
1244} 1299}
@@ -1247,14 +1302,14 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
1247 * Our MPS died. Tell our daemon to send NHRP data plane purge to each 1302 * Our MPS died. Tell our daemon to send NHRP data plane purge to each
1248 * of the egress shortcuts we have. 1303 * of the egress shortcuts we have.
1249 */ 1304 */
1250static void mps_death( struct k_message * msg, struct mpoa_client * mpc ) 1305static void mps_death(struct k_message *msg, struct mpoa_client *mpc)
1251{ 1306{
1252 eg_cache_entry *entry; 1307 eg_cache_entry *entry;
1253 1308
1254 dprintk("mpoa: (%s) mps_death:\n", mpc->dev->name); 1309 dprintk("(%s)\n", mpc->dev->name);
1255 1310
1256 if(memcmp(msg->MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN)){ 1311 if (memcmp(msg->MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN)) {
1257 printk("mpoa: (%s) mps_death: wrong MPS\n", mpc->dev->name); 1312 pr_info("(%s) wrong MPS\n", mpc->dev->name);
1258 return; 1313 return;
1259 } 1314 }
1260 1315
@@ -1273,20 +1328,21 @@ static void mps_death( struct k_message * msg, struct mpoa_client * mpc )
1273 return; 1328 return;
1274} 1329}
1275 1330
1276static void MPOA_cache_impos_rcvd( struct k_message * msg, struct mpoa_client * mpc) 1331static void MPOA_cache_impos_rcvd(struct k_message *msg,
1332 struct mpoa_client *mpc)
1277{ 1333{
1278 uint16_t holding_time; 1334 uint16_t holding_time;
1279 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(msg->content.eg_info.cache_id, mpc); 1335 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(msg->content.eg_info.cache_id, mpc);
1280 1336
1281 holding_time = msg->content.eg_info.holding_time; 1337 holding_time = msg->content.eg_info.holding_time;
1282 dprintk("mpoa: (%s) MPOA_cache_impos_rcvd: entry = %p, holding_time = %u\n", 1338 dprintk("(%s) entry = %p, holding_time = %u\n",
1283 mpc->dev->name, entry, holding_time); 1339 mpc->dev->name, entry, holding_time);
1284 if(entry == NULL && holding_time) { 1340 if (entry == NULL && holding_time) {
1285 entry = mpc->eg_ops->add_entry(msg, mpc); 1341 entry = mpc->eg_ops->add_entry(msg, mpc);
1286 mpc->eg_ops->put(entry); 1342 mpc->eg_ops->put(entry);
1287 return; 1343 return;
1288 } 1344 }
1289 if(holding_time){ 1345 if (holding_time) {
1290 mpc->eg_ops->update(entry, holding_time); 1346 mpc->eg_ops->update(entry, holding_time);
1291 return; 1347 return;
1292 } 1348 }
@@ -1300,7 +1356,8 @@ static void MPOA_cache_impos_rcvd( struct k_message * msg, struct mpoa_client *
1300 return; 1356 return;
1301} 1357}
1302 1358
1303static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc) 1359static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg,
1360 struct mpoa_client *mpc)
1304{ 1361{
1305 struct lec_priv *priv; 1362 struct lec_priv *priv;
1306 int i, retval ; 1363 int i, retval ;
@@ -1315,34 +1372,39 @@ static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *m
1315 memcpy(&tlv[7], mesg->MPS_ctrl, ATM_ESA_LEN); /* MPC ctrl ATM addr */ 1372 memcpy(&tlv[7], mesg->MPS_ctrl, ATM_ESA_LEN); /* MPC ctrl ATM addr */
1316 memcpy(mpc->our_ctrl_addr, mesg->MPS_ctrl, ATM_ESA_LEN); 1373 memcpy(mpc->our_ctrl_addr, mesg->MPS_ctrl, ATM_ESA_LEN);
1317 1374
1318 dprintk("mpoa: (%s) setting MPC ctrl ATM address to ", 1375 dprintk("(%s) setting MPC ctrl ATM address to",
1319 (mpc->dev) ? mpc->dev->name : "<unknown>"); 1376 mpc->dev ? mpc->dev->name : "<unknown>");
1320 for (i = 7; i < sizeof(tlv); i++) 1377 for (i = 7; i < sizeof(tlv); i++)
1321 dprintk("%02x ", tlv[i]); 1378 dprintk_cont(" %02x", tlv[i]);
1322 dprintk("\n"); 1379 dprintk_cont("\n");
1323 1380
1324 if (mpc->dev) { 1381 if (mpc->dev) {
1325 priv = netdev_priv(mpc->dev); 1382 priv = netdev_priv(mpc->dev);
1326 retval = priv->lane2_ops->associate_req(mpc->dev, mpc->dev->dev_addr, tlv, sizeof(tlv)); 1383 retval = priv->lane2_ops->associate_req(mpc->dev,
1384 mpc->dev->dev_addr,
1385 tlv, sizeof(tlv));
1327 if (retval == 0) 1386 if (retval == 0)
1328 printk("mpoa: (%s) MPOA device type TLV association failed\n", mpc->dev->name); 1387 pr_info("(%s) MPOA device type TLV association failed\n",
1388 mpc->dev->name);
1329 retval = priv->lane2_ops->resolve(mpc->dev, NULL, 1, NULL, NULL); 1389 retval = priv->lane2_ops->resolve(mpc->dev, NULL, 1, NULL, NULL);
1330 if (retval < 0) 1390 if (retval < 0)
1331 printk("mpoa: (%s) targetless LE_ARP request failed\n", mpc->dev->name); 1391 pr_info("(%s) targetless LE_ARP request failed\n",
1392 mpc->dev->name);
1332 } 1393 }
1333 1394
1334 return; 1395 return;
1335} 1396}
1336 1397
1337static void set_mps_mac_addr_rcvd(struct k_message *msg, struct mpoa_client *client) 1398static void set_mps_mac_addr_rcvd(struct k_message *msg,
1399 struct mpoa_client *client)
1338{ 1400{
1339 1401
1340 if(client->number_of_mps_macs) 1402 if (client->number_of_mps_macs)
1341 kfree(client->mps_macs); 1403 kfree(client->mps_macs);
1342 client->number_of_mps_macs = 0; 1404 client->number_of_mps_macs = 0;
1343 client->mps_macs = kmemdup(msg->MPS_ctrl, ETH_ALEN, GFP_KERNEL); 1405 client->mps_macs = kmemdup(msg->MPS_ctrl, ETH_ALEN, GFP_KERNEL);
1344 if (client->mps_macs == NULL) { 1406 if (client->mps_macs == NULL) {
1345 printk("mpoa: set_mps_mac_addr_rcvd: out of memory\n"); 1407 pr_info("out of memory\n");
1346 return; 1408 return;
1347 } 1409 }
1348 client->number_of_mps_macs = 1; 1410 client->number_of_mps_macs = 1;
@@ -1363,11 +1425,11 @@ static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action)
1363 /* FIXME: This knows too much of the cache structure */ 1425 /* FIXME: This knows too much of the cache structure */
1364 read_lock_irq(&mpc->egress_lock); 1426 read_lock_irq(&mpc->egress_lock);
1365 entry = mpc->eg_cache; 1427 entry = mpc->eg_cache;
1366 while (entry != NULL){ 1428 while (entry != NULL) {
1367 msg->content.eg_info = entry->ctrl_info; 1429 msg->content.eg_info = entry->ctrl_info;
1368 dprintk("mpoa: cache_id %u\n", entry->ctrl_info.cache_id); 1430 dprintk("cache_id %u\n", entry->ctrl_info.cache_id);
1369 msg_to_mpoad(msg, mpc); 1431 msg_to_mpoad(msg, mpc);
1370 entry = entry->next; 1432 entry = entry->next;
1371 } 1433 }
1372 read_unlock_irq(&mpc->egress_lock); 1434 read_unlock_irq(&mpc->egress_lock);
1373 1435
@@ -1386,20 +1448,22 @@ static void mpc_timer_refresh(void)
1386 return; 1448 return;
1387} 1449}
1388 1450
1389static void mpc_cache_check( unsigned long checking_time ) 1451static void mpc_cache_check(unsigned long checking_time)
1390{ 1452{
1391 struct mpoa_client *mpc = mpcs; 1453 struct mpoa_client *mpc = mpcs;
1392 static unsigned long previous_resolving_check_time; 1454 static unsigned long previous_resolving_check_time;
1393 static unsigned long previous_refresh_time; 1455 static unsigned long previous_refresh_time;
1394 1456
1395 while( mpc != NULL ){ 1457 while (mpc != NULL) {
1396 mpc->in_ops->clear_count(mpc); 1458 mpc->in_ops->clear_count(mpc);
1397 mpc->eg_ops->clear_expired(mpc); 1459 mpc->eg_ops->clear_expired(mpc);
1398 if(checking_time - previous_resolving_check_time > mpc->parameters.mpc_p4 * HZ ){ 1460 if (checking_time - previous_resolving_check_time >
1461 mpc->parameters.mpc_p4 * HZ) {
1399 mpc->in_ops->check_resolving(mpc); 1462 mpc->in_ops->check_resolving(mpc);
1400 previous_resolving_check_time = checking_time; 1463 previous_resolving_check_time = checking_time;
1401 } 1464 }
1402 if(checking_time - previous_refresh_time > mpc->parameters.mpc_p5 * HZ ){ 1465 if (checking_time - previous_refresh_time >
1466 mpc->parameters.mpc_p5 * HZ) {
1403 mpc->in_ops->refresh(mpc); 1467 mpc->in_ops->refresh(mpc);
1404 previous_refresh_time = checking_time; 1468 previous_refresh_time = checking_time;
1405 } 1469 }
@@ -1410,7 +1474,8 @@ static void mpc_cache_check( unsigned long checking_time )
1410 return; 1474 return;
1411} 1475}
1412 1476
1413static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1477static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd,
1478 unsigned long arg)
1414{ 1479{
1415 int err = 0; 1480 int err = 0;
1416 struct atm_vcc *vcc = ATM_SD(sock); 1481 struct atm_vcc *vcc = ATM_SD(sock);
@@ -1422,21 +1487,20 @@ static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd, unsigned long a
1422 return -EPERM; 1487 return -EPERM;
1423 1488
1424 switch (cmd) { 1489 switch (cmd) {
1425 case ATMMPC_CTRL: 1490 case ATMMPC_CTRL:
1426 err = atm_mpoa_mpoad_attach(vcc, (int)arg); 1491 err = atm_mpoa_mpoad_attach(vcc, (int)arg);
1427 if (err >= 0) 1492 if (err >= 0)
1428 sock->state = SS_CONNECTED; 1493 sock->state = SS_CONNECTED;
1429 break; 1494 break;
1430 case ATMMPC_DATA: 1495 case ATMMPC_DATA:
1431 err = atm_mpoa_vcc_attach(vcc, (void __user *)arg); 1496 err = atm_mpoa_vcc_attach(vcc, (void __user *)arg);
1432 break; 1497 break;
1433 default: 1498 default:
1434 break; 1499 break;
1435 } 1500 }
1436 return err; 1501 return err;
1437} 1502}
1438 1503
1439
1440static struct atm_ioctl atm_ioctl_ops = { 1504static struct atm_ioctl atm_ioctl_ops = {
1441 .owner = THIS_MODULE, 1505 .owner = THIS_MODULE,
1442 .ioctl = atm_mpoa_ioctl, 1506 .ioctl = atm_mpoa_ioctl,
@@ -1447,9 +1511,9 @@ static __init int atm_mpoa_init(void)
1447 register_atm_ioctl(&atm_ioctl_ops); 1511 register_atm_ioctl(&atm_ioctl_ops);
1448 1512
1449 if (mpc_proc_init() != 0) 1513 if (mpc_proc_init() != 0)
1450 printk(KERN_INFO "mpoa: failed to initialize /proc/mpoa\n"); 1514 pr_info("failed to initialize /proc/mpoa\n");
1451 1515
1452 printk("mpc.c: " __DATE__ " " __TIME__ " initialized\n"); 1516 pr_info("mpc.c: " __DATE__ " " __TIME__ " initialized\n");
1453 1517
1454 return 0; 1518 return 0;
1455} 1519}
@@ -1476,15 +1540,15 @@ static void __exit atm_mpoa_cleanup(void)
1476 if (priv->lane2_ops != NULL) 1540 if (priv->lane2_ops != NULL)
1477 priv->lane2_ops->associate_indicator = NULL; 1541 priv->lane2_ops->associate_indicator = NULL;
1478 } 1542 }
1479 ddprintk("mpoa: cleanup_module: about to clear caches\n"); 1543 ddprintk("about to clear caches\n");
1480 mpc->in_ops->destroy_cache(mpc); 1544 mpc->in_ops->destroy_cache(mpc);
1481 mpc->eg_ops->destroy_cache(mpc); 1545 mpc->eg_ops->destroy_cache(mpc);
1482 ddprintk("mpoa: cleanup_module: caches cleared\n"); 1546 ddprintk("caches cleared\n");
1483 kfree(mpc->mps_macs); 1547 kfree(mpc->mps_macs);
1484 memset(mpc, 0, sizeof(struct mpoa_client)); 1548 memset(mpc, 0, sizeof(struct mpoa_client));
1485 ddprintk("mpoa: cleanup_module: about to kfree %p\n", mpc); 1549 ddprintk("about to kfree %p\n", mpc);
1486 kfree(mpc); 1550 kfree(mpc);
1487 ddprintk("mpoa: cleanup_module: next mpc is at %p\n", tmp); 1551 ddprintk("next mpc is at %p\n", tmp);
1488 mpc = tmp; 1552 mpc = tmp;
1489 } 1553 }
1490 1554
@@ -1492,7 +1556,7 @@ static void __exit atm_mpoa_cleanup(void)
1492 qos_head = NULL; 1556 qos_head = NULL;
1493 while (qos != NULL) { 1557 while (qos != NULL) {
1494 nextqos = qos->next; 1558 nextqos = qos->next;
1495 dprintk("mpoa: cleanup_module: freeing qos entry %p\n", qos); 1559 dprintk("freeing qos entry %p\n", qos);
1496 kfree(qos); 1560 kfree(qos);
1497 qos = nextqos; 1561 qos = nextqos;
1498 } 1562 }
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
index 4504a4b339bb..4c141810eb6d 100644
--- a/net/atm/mpoa_caches.c
+++ b/net/atm/mpoa_caches.c
@@ -11,15 +11,23 @@
11 */ 11 */
12 12
13#if 0 13#if 0
14#define dprintk printk /* debug */ 14#define dprintk(format, args...) \
15 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
15#else 16#else
16#define dprintk(format,args...) 17#define dprintk(format, args...) \
18 do { if (0) \
19 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
20 } while (0)
17#endif 21#endif
18 22
19#if 0 23#if 0
20#define ddprintk printk /* more debug */ 24#define ddprintk(format, args...) \
25 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
21#else 26#else
22#define ddprintk(format,args...) 27#define ddprintk(format, args...) \
28 do { if (0) \
29 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
30 } while (0)
23#endif 31#endif
24 32
25static in_cache_entry *in_cache_get(__be32 dst_ip, 33static in_cache_entry *in_cache_get(__be32 dst_ip,
@@ -29,8 +37,8 @@ static in_cache_entry *in_cache_get(__be32 dst_ip,
29 37
30 read_lock_bh(&client->ingress_lock); 38 read_lock_bh(&client->ingress_lock);
31 entry = client->in_cache; 39 entry = client->in_cache;
32 while(entry != NULL){ 40 while (entry != NULL) {
33 if( entry->ctrl_info.in_dst_ip == dst_ip ){ 41 if (entry->ctrl_info.in_dst_ip == dst_ip) {
34 atomic_inc(&entry->use); 42 atomic_inc(&entry->use);
35 read_unlock_bh(&client->ingress_lock); 43 read_unlock_bh(&client->ingress_lock);
36 return entry; 44 return entry;
@@ -50,8 +58,8 @@ static in_cache_entry *in_cache_get_with_mask(__be32 dst_ip,
50 58
51 read_lock_bh(&client->ingress_lock); 59 read_lock_bh(&client->ingress_lock);
52 entry = client->in_cache; 60 entry = client->in_cache;
53 while(entry != NULL){ 61 while (entry != NULL) {
54 if((entry->ctrl_info.in_dst_ip & mask) == (dst_ip & mask )){ 62 if ((entry->ctrl_info.in_dst_ip & mask) == (dst_ip & mask)) {
55 atomic_inc(&entry->use); 63 atomic_inc(&entry->use);
56 read_unlock_bh(&client->ingress_lock); 64 read_unlock_bh(&client->ingress_lock);
57 return entry; 65 return entry;
@@ -65,14 +73,14 @@ static in_cache_entry *in_cache_get_with_mask(__be32 dst_ip,
65} 73}
66 74
67static in_cache_entry *in_cache_get_by_vcc(struct atm_vcc *vcc, 75static in_cache_entry *in_cache_get_by_vcc(struct atm_vcc *vcc,
68 struct mpoa_client *client ) 76 struct mpoa_client *client)
69{ 77{
70 in_cache_entry *entry; 78 in_cache_entry *entry;
71 79
72 read_lock_bh(&client->ingress_lock); 80 read_lock_bh(&client->ingress_lock);
73 entry = client->in_cache; 81 entry = client->in_cache;
74 while(entry != NULL){ 82 while (entry != NULL) {
75 if(entry->shortcut == vcc) { 83 if (entry->shortcut == vcc) {
76 atomic_inc(&entry->use); 84 atomic_inc(&entry->use);
77 read_unlock_bh(&client->ingress_lock); 85 read_unlock_bh(&client->ingress_lock);
78 return entry; 86 return entry;
@@ -90,14 +98,14 @@ static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
90 in_cache_entry *entry = kzalloc(sizeof(in_cache_entry), GFP_KERNEL); 98 in_cache_entry *entry = kzalloc(sizeof(in_cache_entry), GFP_KERNEL);
91 99
92 if (entry == NULL) { 100 if (entry == NULL) {
93 printk("mpoa: mpoa_caches.c: new_in_cache_entry: out of memory\n"); 101 pr_info("mpoa: mpoa_caches.c: new_in_cache_entry: out of memory\n");
94 return NULL; 102 return NULL;
95 } 103 }
96 104
97 dprintk("mpoa: mpoa_caches.c: adding an ingress entry, ip = %pI4\n", &dst_ip); 105 dprintk("adding an ingress entry, ip = %pI4\n", &dst_ip);
98 106
99 atomic_set(&entry->use, 1); 107 atomic_set(&entry->use, 1);
100 dprintk("mpoa: mpoa_caches.c: new_in_cache_entry: about to lock\n"); 108 dprintk("new_in_cache_entry: about to lock\n");
101 write_lock_bh(&client->ingress_lock); 109 write_lock_bh(&client->ingress_lock);
102 entry->next = client->in_cache; 110 entry->next = client->in_cache;
103 entry->prev = NULL; 111 entry->prev = NULL;
@@ -115,7 +123,7 @@ static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
115 atomic_inc(&entry->use); 123 atomic_inc(&entry->use);
116 124
117 write_unlock_bh(&client->ingress_lock); 125 write_unlock_bh(&client->ingress_lock);
118 dprintk("mpoa: mpoa_caches.c: new_in_cache_entry: unlocked\n"); 126 dprintk("new_in_cache_entry: unlocked\n");
119 127
120 return entry; 128 return entry;
121} 129}
@@ -126,39 +134,41 @@ static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc)
126 struct k_message msg; 134 struct k_message msg;
127 135
128 entry->count++; 136 entry->count++;
129 if(entry->entry_state == INGRESS_RESOLVED && entry->shortcut != NULL) 137 if (entry->entry_state == INGRESS_RESOLVED && entry->shortcut != NULL)
130 return OPEN; 138 return OPEN;
131 139
132 if(entry->entry_state == INGRESS_REFRESHING){ 140 if (entry->entry_state == INGRESS_REFRESHING) {
133 if(entry->count > mpc->parameters.mpc_p1){ 141 if (entry->count > mpc->parameters.mpc_p1) {
134 msg.type = SND_MPOA_RES_RQST; 142 msg.type = SND_MPOA_RES_RQST;
135 msg.content.in_info = entry->ctrl_info; 143 msg.content.in_info = entry->ctrl_info;
136 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN); 144 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN);
137 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip); 145 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
138 if (qos != NULL) msg.qos = qos->qos; 146 if (qos != NULL)
147 msg.qos = qos->qos;
139 msg_to_mpoad(&msg, mpc); 148 msg_to_mpoad(&msg, mpc);
140 do_gettimeofday(&(entry->reply_wait)); 149 do_gettimeofday(&(entry->reply_wait));
141 entry->entry_state = INGRESS_RESOLVING; 150 entry->entry_state = INGRESS_RESOLVING;
142 } 151 }
143 if(entry->shortcut != NULL) 152 if (entry->shortcut != NULL)
144 return OPEN; 153 return OPEN;
145 return CLOSED; 154 return CLOSED;
146 } 155 }
147 156
148 if(entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL) 157 if (entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL)
149 return OPEN; 158 return OPEN;
150 159
151 if( entry->count > mpc->parameters.mpc_p1 && 160 if (entry->count > mpc->parameters.mpc_p1 &&
152 entry->entry_state == INGRESS_INVALID){ 161 entry->entry_state == INGRESS_INVALID) {
153 dprintk("mpoa: (%s) mpoa_caches.c: threshold exceeded for ip %pI4, sending MPOA res req\n", 162 dprintk("(%s) threshold exceeded for ip %pI4, sending MPOA res req\n",
154 mpc->dev->name, &entry->ctrl_info.in_dst_ip); 163 mpc->dev->name, &entry->ctrl_info.in_dst_ip);
155 entry->entry_state = INGRESS_RESOLVING; 164 entry->entry_state = INGRESS_RESOLVING;
156 msg.type = SND_MPOA_RES_RQST; 165 msg.type = SND_MPOA_RES_RQST;
157 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN ); 166 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN);
158 msg.content.in_info = entry->ctrl_info; 167 msg.content.in_info = entry->ctrl_info;
159 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip); 168 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
160 if (qos != NULL) msg.qos = qos->qos; 169 if (qos != NULL)
161 msg_to_mpoad( &msg, mpc); 170 msg.qos = qos->qos;
171 msg_to_mpoad(&msg, mpc);
162 do_gettimeofday(&(entry->reply_wait)); 172 do_gettimeofday(&(entry->reply_wait));
163 } 173 }
164 174
@@ -185,7 +195,7 @@ static void in_cache_remove_entry(in_cache_entry *entry,
185 struct k_message msg; 195 struct k_message msg;
186 196
187 vcc = entry->shortcut; 197 vcc = entry->shortcut;
188 dprintk("mpoa: mpoa_caches.c: removing an ingress entry, ip = %pI4\n", 198 dprintk("removing an ingress entry, ip = %pI4\n",
189 &entry->ctrl_info.in_dst_ip); 199 &entry->ctrl_info.in_dst_ip);
190 200
191 if (entry->prev != NULL) 201 if (entry->prev != NULL)
@@ -195,14 +205,15 @@ static void in_cache_remove_entry(in_cache_entry *entry,
195 if (entry->next != NULL) 205 if (entry->next != NULL)
196 entry->next->prev = entry->prev; 206 entry->next->prev = entry->prev;
197 client->in_ops->put(entry); 207 client->in_ops->put(entry);
198 if(client->in_cache == NULL && client->eg_cache == NULL){ 208 if (client->in_cache == NULL && client->eg_cache == NULL) {
199 msg.type = STOP_KEEP_ALIVE_SM; 209 msg.type = STOP_KEEP_ALIVE_SM;
200 msg_to_mpoad(&msg,client); 210 msg_to_mpoad(&msg, client);
201 } 211 }
202 212
203 /* Check if the egress side still uses this VCC */ 213 /* Check if the egress side still uses this VCC */
204 if (vcc != NULL) { 214 if (vcc != NULL) {
205 eg_cache_entry *eg_entry = client->eg_ops->get_by_vcc(vcc, client); 215 eg_cache_entry *eg_entry = client->eg_ops->get_by_vcc(vcc,
216 client);
206 if (eg_entry != NULL) { 217 if (eg_entry != NULL) {
207 client->eg_ops->put(eg_entry); 218 client->eg_ops->put(eg_entry);
208 return; 219 return;
@@ -213,7 +224,6 @@ static void in_cache_remove_entry(in_cache_entry *entry,
213 return; 224 return;
214} 225}
215 226
216
217/* Call this every MPC-p2 seconds... Not exactly correct solution, 227/* Call this every MPC-p2 seconds... Not exactly correct solution,
218 but an easy one... */ 228 but an easy one... */
219static void clear_count_and_expired(struct mpoa_client *client) 229static void clear_count_and_expired(struct mpoa_client *client)
@@ -225,12 +235,12 @@ static void clear_count_and_expired(struct mpoa_client *client)
225 235
226 write_lock_bh(&client->ingress_lock); 236 write_lock_bh(&client->ingress_lock);
227 entry = client->in_cache; 237 entry = client->in_cache;
228 while(entry != NULL){ 238 while (entry != NULL) {
229 entry->count=0; 239 entry->count = 0;
230 next_entry = entry->next; 240 next_entry = entry->next;
231 if((now.tv_sec - entry->tv.tv_sec) 241 if ((now.tv_sec - entry->tv.tv_sec)
232 > entry->ctrl_info.holding_time){ 242 > entry->ctrl_info.holding_time) {
233 dprintk("mpoa: mpoa_caches.c: holding time expired, ip = %pI4\n", 243 dprintk("holding time expired, ip = %pI4\n",
234 &entry->ctrl_info.in_dst_ip); 244 &entry->ctrl_info.in_dst_ip);
235 client->in_ops->remove_entry(entry, client); 245 client->in_ops->remove_entry(entry, client);
236 } 246 }
@@ -250,33 +260,38 @@ static void check_resolving_entries(struct mpoa_client *client)
250 struct timeval now; 260 struct timeval now;
251 struct k_message msg; 261 struct k_message msg;
252 262
253 do_gettimeofday( &now ); 263 do_gettimeofday(&now);
254 264
255 read_lock_bh(&client->ingress_lock); 265 read_lock_bh(&client->ingress_lock);
256 entry = client->in_cache; 266 entry = client->in_cache;
257 while( entry != NULL ){ 267 while (entry != NULL) {
258 if(entry->entry_state == INGRESS_RESOLVING){ 268 if (entry->entry_state == INGRESS_RESOLVING) {
259 if(now.tv_sec - entry->hold_down.tv_sec < client->parameters.mpc_p6){ 269 if ((now.tv_sec - entry->hold_down.tv_sec) <
260 entry = entry->next; /* Entry in hold down */ 270 client->parameters.mpc_p6) {
271 entry = entry->next; /* Entry in hold down */
261 continue; 272 continue;
262 } 273 }
263 if( (now.tv_sec - entry->reply_wait.tv_sec) > 274 if ((now.tv_sec - entry->reply_wait.tv_sec) >
264 entry->retry_time ){ 275 entry->retry_time) {
265 entry->retry_time = MPC_C1*( entry->retry_time ); 276 entry->retry_time = MPC_C1 * (entry->retry_time);
266 if(entry->retry_time > client->parameters.mpc_p5){ 277 /*
267 /* Retry time maximum exceeded, put entry in hold down. */ 278 * Retry time maximum exceeded,
279 * put entry in hold down.
280 */
281 if (entry->retry_time > client->parameters.mpc_p5) {
268 do_gettimeofday(&(entry->hold_down)); 282 do_gettimeofday(&(entry->hold_down));
269 entry->retry_time = client->parameters.mpc_p4; 283 entry->retry_time = client->parameters.mpc_p4;
270 entry = entry->next; 284 entry = entry->next;
271 continue; 285 continue;
272 } 286 }
273 /* Ask daemon to send a resolution request. */ 287 /* Ask daemon to send a resolution request. */
274 memset(&(entry->hold_down),0,sizeof(struct timeval)); 288 memset(&(entry->hold_down), 0, sizeof(struct timeval));
275 msg.type = SND_MPOA_RES_RTRY; 289 msg.type = SND_MPOA_RES_RTRY;
276 memcpy(msg.MPS_ctrl, client->mps_ctrl_addr, ATM_ESA_LEN); 290 memcpy(msg.MPS_ctrl, client->mps_ctrl_addr, ATM_ESA_LEN);
277 msg.content.in_info = entry->ctrl_info; 291 msg.content.in_info = entry->ctrl_info;
278 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip); 292 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
279 if (qos != NULL) msg.qos = qos->qos; 293 if (qos != NULL)
294 msg.qos = qos->qos;
280 msg_to_mpoad(&msg, client); 295 msg_to_mpoad(&msg, client);
281 do_gettimeofday(&(entry->reply_wait)); 296 do_gettimeofday(&(entry->reply_wait));
282 } 297 }
@@ -292,16 +307,17 @@ static void refresh_entries(struct mpoa_client *client)
292 struct timeval now; 307 struct timeval now;
293 struct in_cache_entry *entry = client->in_cache; 308 struct in_cache_entry *entry = client->in_cache;
294 309
295 ddprintk("mpoa: mpoa_caches.c: refresh_entries\n"); 310 ddprintk("refresh_entries\n");
296 do_gettimeofday(&now); 311 do_gettimeofday(&now);
297 312
298 read_lock_bh(&client->ingress_lock); 313 read_lock_bh(&client->ingress_lock);
299 while( entry != NULL ){ 314 while (entry != NULL) {
300 if( entry->entry_state == INGRESS_RESOLVED ){ 315 if (entry->entry_state == INGRESS_RESOLVED) {
301 if(!(entry->refresh_time)) 316 if (!(entry->refresh_time))
302 entry->refresh_time = (2*(entry->ctrl_info.holding_time))/3; 317 entry->refresh_time = (2 * (entry->ctrl_info.holding_time))/3;
303 if( (now.tv_sec - entry->reply_wait.tv_sec) > entry->refresh_time ){ 318 if ((now.tv_sec - entry->reply_wait.tv_sec) >
304 dprintk("mpoa: mpoa_caches.c: refreshing an entry.\n"); 319 entry->refresh_time) {
320 dprintk("refreshing an entry.\n");
305 entry->entry_state = INGRESS_REFRESHING; 321 entry->entry_state = INGRESS_REFRESHING;
306 322
307 } 323 }
@@ -314,21 +330,22 @@ static void refresh_entries(struct mpoa_client *client)
314static void in_destroy_cache(struct mpoa_client *mpc) 330static void in_destroy_cache(struct mpoa_client *mpc)
315{ 331{
316 write_lock_irq(&mpc->ingress_lock); 332 write_lock_irq(&mpc->ingress_lock);
317 while(mpc->in_cache != NULL) 333 while (mpc->in_cache != NULL)
318 mpc->in_ops->remove_entry(mpc->in_cache, mpc); 334 mpc->in_ops->remove_entry(mpc->in_cache, mpc);
319 write_unlock_irq(&mpc->ingress_lock); 335 write_unlock_irq(&mpc->ingress_lock);
320 336
321 return; 337 return;
322} 338}
323 339
324static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id, struct mpoa_client *mpc) 340static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id,
341 struct mpoa_client *mpc)
325{ 342{
326 eg_cache_entry *entry; 343 eg_cache_entry *entry;
327 344
328 read_lock_irq(&mpc->egress_lock); 345 read_lock_irq(&mpc->egress_lock);
329 entry = mpc->eg_cache; 346 entry = mpc->eg_cache;
330 while(entry != NULL){ 347 while (entry != NULL) {
331 if(entry->ctrl_info.cache_id == cache_id){ 348 if (entry->ctrl_info.cache_id == cache_id) {
332 atomic_inc(&entry->use); 349 atomic_inc(&entry->use);
333 read_unlock_irq(&mpc->egress_lock); 350 read_unlock_irq(&mpc->egress_lock);
334 return entry; 351 return entry;
@@ -348,7 +365,7 @@ static eg_cache_entry *eg_cache_get_by_tag(__be32 tag, struct mpoa_client *mpc)
348 365
349 read_lock_irqsave(&mpc->egress_lock, flags); 366 read_lock_irqsave(&mpc->egress_lock, flags);
350 entry = mpc->eg_cache; 367 entry = mpc->eg_cache;
351 while (entry != NULL){ 368 while (entry != NULL) {
352 if (entry->ctrl_info.tag == tag) { 369 if (entry->ctrl_info.tag == tag) {
353 atomic_inc(&entry->use); 370 atomic_inc(&entry->use);
354 read_unlock_irqrestore(&mpc->egress_lock, flags); 371 read_unlock_irqrestore(&mpc->egress_lock, flags);
@@ -362,14 +379,15 @@ static eg_cache_entry *eg_cache_get_by_tag(__be32 tag, struct mpoa_client *mpc)
362} 379}
363 380
364/* This can be called from any context since it saves CPU flags */ 381/* This can be called from any context since it saves CPU flags */
365static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc, struct mpoa_client *mpc) 382static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc,
383 struct mpoa_client *mpc)
366{ 384{
367 unsigned long flags; 385 unsigned long flags;
368 eg_cache_entry *entry; 386 eg_cache_entry *entry;
369 387
370 read_lock_irqsave(&mpc->egress_lock, flags); 388 read_lock_irqsave(&mpc->egress_lock, flags);
371 entry = mpc->eg_cache; 389 entry = mpc->eg_cache;
372 while (entry != NULL){ 390 while (entry != NULL) {
373 if (entry->shortcut == vcc) { 391 if (entry->shortcut == vcc) {
374 atomic_inc(&entry->use); 392 atomic_inc(&entry->use);
375 read_unlock_irqrestore(&mpc->egress_lock, flags); 393 read_unlock_irqrestore(&mpc->egress_lock, flags);
@@ -382,14 +400,15 @@ static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc, struct mpoa_clie
382 return NULL; 400 return NULL;
383} 401}
384 402
385static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr, struct mpoa_client *mpc) 403static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr,
404 struct mpoa_client *mpc)
386{ 405{
387 eg_cache_entry *entry; 406 eg_cache_entry *entry;
388 407
389 read_lock_irq(&mpc->egress_lock); 408 read_lock_irq(&mpc->egress_lock);
390 entry = mpc->eg_cache; 409 entry = mpc->eg_cache;
391 while(entry != NULL){ 410 while (entry != NULL) {
392 if(entry->latest_ip_addr == ipaddr) { 411 if (entry->latest_ip_addr == ipaddr) {
393 atomic_inc(&entry->use); 412 atomic_inc(&entry->use);
394 read_unlock_irq(&mpc->egress_lock); 413 read_unlock_irq(&mpc->egress_lock);
395 return entry; 414 return entry;
@@ -421,7 +440,7 @@ static void eg_cache_remove_entry(eg_cache_entry *entry,
421 struct k_message msg; 440 struct k_message msg;
422 441
423 vcc = entry->shortcut; 442 vcc = entry->shortcut;
424 dprintk("mpoa: mpoa_caches.c: removing an egress entry.\n"); 443 dprintk("removing an egress entry.\n");
425 if (entry->prev != NULL) 444 if (entry->prev != NULL)
426 entry->prev->next = entry->next; 445 entry->prev->next = entry->next;
427 else 446 else
@@ -429,9 +448,9 @@ static void eg_cache_remove_entry(eg_cache_entry *entry,
429 if (entry->next != NULL) 448 if (entry->next != NULL)
430 entry->next->prev = entry->prev; 449 entry->next->prev = entry->prev;
431 client->eg_ops->put(entry); 450 client->eg_ops->put(entry);
432 if(client->in_cache == NULL && client->eg_cache == NULL){ 451 if (client->in_cache == NULL && client->eg_cache == NULL) {
433 msg.type = STOP_KEEP_ALIVE_SM; 452 msg.type = STOP_KEEP_ALIVE_SM;
434 msg_to_mpoad(&msg,client); 453 msg_to_mpoad(&msg, client);
435 } 454 }
436 455
437 /* Check if the ingress side still uses this VCC */ 456 /* Check if the ingress side still uses this VCC */
@@ -447,20 +466,21 @@ static void eg_cache_remove_entry(eg_cache_entry *entry,
447 return; 466 return;
448} 467}
449 468
450static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, struct mpoa_client *client) 469static eg_cache_entry *eg_cache_add_entry(struct k_message *msg,
470 struct mpoa_client *client)
451{ 471{
452 eg_cache_entry *entry = kzalloc(sizeof(eg_cache_entry), GFP_KERNEL); 472 eg_cache_entry *entry = kzalloc(sizeof(eg_cache_entry), GFP_KERNEL);
453 473
454 if (entry == NULL) { 474 if (entry == NULL) {
455 printk("mpoa: mpoa_caches.c: new_eg_cache_entry: out of memory\n"); 475 pr_info("out of memory\n");
456 return NULL; 476 return NULL;
457 } 477 }
458 478
459 dprintk("mpoa: mpoa_caches.c: adding an egress entry, ip = %pI4, this should be our IP\n", 479 dprintk("adding an egress entry, ip = %pI4, this should be our IP\n",
460 &msg->content.eg_info.eg_dst_ip); 480 &msg->content.eg_info.eg_dst_ip);
461 481
462 atomic_set(&entry->use, 1); 482 atomic_set(&entry->use, 1);
463 dprintk("mpoa: mpoa_caches.c: new_eg_cache_entry: about to lock\n"); 483 dprintk("new_eg_cache_entry: about to lock\n");
464 write_lock_irq(&client->egress_lock); 484 write_lock_irq(&client->egress_lock);
465 entry->next = client->eg_cache; 485 entry->next = client->eg_cache;
466 entry->prev = NULL; 486 entry->prev = NULL;
@@ -472,18 +492,18 @@ static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, struct mpoa_cli
472 entry->ctrl_info = msg->content.eg_info; 492 entry->ctrl_info = msg->content.eg_info;
473 do_gettimeofday(&(entry->tv)); 493 do_gettimeofday(&(entry->tv));
474 entry->entry_state = EGRESS_RESOLVED; 494 entry->entry_state = EGRESS_RESOLVED;
475 dprintk("mpoa: mpoa_caches.c: new_eg_cache_entry cache_id %lu\n", ntohl(entry->ctrl_info.cache_id)); 495 dprintk("new_eg_cache_entry cache_id %u\n",
476 dprintk("mpoa: mpoa_caches.c: mps_ip = %pI4\n", 496 ntohl(entry->ctrl_info.cache_id));
477 &entry->ctrl_info.mps_ip); 497 dprintk("mps_ip = %pI4\n", &entry->ctrl_info.mps_ip);
478 atomic_inc(&entry->use); 498 atomic_inc(&entry->use);
479 499
480 write_unlock_irq(&client->egress_lock); 500 write_unlock_irq(&client->egress_lock);
481 dprintk("mpoa: mpoa_caches.c: new_eg_cache_entry: unlocked\n"); 501 dprintk("new_eg_cache_entry: unlocked\n");
482 502
483 return entry; 503 return entry;
484} 504}
485 505
486static void update_eg_cache_entry(eg_cache_entry * entry, uint16_t holding_time) 506static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time)
487{ 507{
488 do_gettimeofday(&(entry->tv)); 508 do_gettimeofday(&(entry->tv));
489 entry->entry_state = EGRESS_RESOLVED; 509 entry->entry_state = EGRESS_RESOLVED;
@@ -502,13 +522,14 @@ static void clear_expired(struct mpoa_client *client)
502 522
503 write_lock_irq(&client->egress_lock); 523 write_lock_irq(&client->egress_lock);
504 entry = client->eg_cache; 524 entry = client->eg_cache;
505 while(entry != NULL){ 525 while (entry != NULL) {
506 next_entry = entry->next; 526 next_entry = entry->next;
507 if((now.tv_sec - entry->tv.tv_sec) 527 if ((now.tv_sec - entry->tv.tv_sec)
508 > entry->ctrl_info.holding_time){ 528 > entry->ctrl_info.holding_time) {
509 msg.type = SND_EGRESS_PURGE; 529 msg.type = SND_EGRESS_PURGE;
510 msg.content.eg_info = entry->ctrl_info; 530 msg.content.eg_info = entry->ctrl_info;
511 dprintk("mpoa: mpoa_caches.c: egress_cache: holding time expired, cache_id = %lu.\n",ntohl(entry->ctrl_info.cache_id)); 531 dprintk("egress_cache: holding time expired, cache_id = %u.\n",
532 ntohl(entry->ctrl_info.cache_id));
512 msg_to_mpoad(&msg, client); 533 msg_to_mpoad(&msg, client);
513 client->eg_ops->remove_entry(entry, client); 534 client->eg_ops->remove_entry(entry, client);
514 } 535 }
@@ -522,7 +543,7 @@ static void clear_expired(struct mpoa_client *client)
522static void eg_destroy_cache(struct mpoa_client *mpc) 543static void eg_destroy_cache(struct mpoa_client *mpc)
523{ 544{
524 write_lock_irq(&mpc->egress_lock); 545 write_lock_irq(&mpc->egress_lock);
525 while(mpc->eg_cache != NULL) 546 while (mpc->eg_cache != NULL)
526 mpc->eg_ops->remove_entry(mpc->eg_cache, mpc); 547 mpc->eg_ops->remove_entry(mpc->eg_cache, mpc);
527 write_unlock_irq(&mpc->egress_lock); 548 write_unlock_irq(&mpc->egress_lock);
528 549
@@ -530,7 +551,6 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
530} 551}
531 552
532 553
533
534static struct in_cache_ops ingress_ops = { 554static struct in_cache_ops ingress_ops = {
535 in_cache_add_entry, /* add_entry */ 555 in_cache_add_entry, /* add_entry */
536 in_cache_get, /* get */ 556 in_cache_get, /* get */
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 1a0f5ccea9c4..b9bdb98427e4 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -1,3 +1,4 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
1 2
2#ifdef CONFIG_PROC_FS 3#ifdef CONFIG_PROC_FS
3#include <linux/errno.h> 4#include <linux/errno.h>
@@ -8,7 +9,7 @@
8#include <linux/proc_fs.h> 9#include <linux/proc_fs.h>
9#include <linux/time.h> 10#include <linux/time.h>
10#include <linux/seq_file.h> 11#include <linux/seq_file.h>
11#include <asm/uaccess.h> 12#include <linux/uaccess.h>
12#include <linux/atmmpc.h> 13#include <linux/atmmpc.h>
13#include <linux/atm.h> 14#include <linux/atm.h>
14#include "mpc.h" 15#include "mpc.h"
@@ -20,9 +21,23 @@
20 */ 21 */
21 22
22#if 1 23#if 1
23#define dprintk printk /* debug */ 24#define dprintk(format, args...) \
25 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
24#else 26#else
25#define dprintk(format,args...) 27#define dprintk(format, args...) \
28 do { if (0) \
29 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
30 } while (0)
31#endif
32
33#if 0
34#define ddprintk(format, args...) \
35 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
36#else
37#define ddprintk(format, args...) \
38 do { if (0) \
39 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
40 } while (0)
26#endif 41#endif
27 42
28#define STAT_FILE_NAME "mpc" /* Our statistic file's name */ 43#define STAT_FILE_NAME "mpc" /* Our statistic file's name */
@@ -51,42 +66,37 @@ static const struct file_operations mpc_file_operations = {
51/* 66/*
52 * Returns the state of an ingress cache entry as a string 67 * Returns the state of an ingress cache entry as a string
53 */ 68 */
54static const char *ingress_state_string(int state){ 69static const char *ingress_state_string(int state)
55 switch(state) { 70{
71 switch (state) {
56 case INGRESS_RESOLVING: 72 case INGRESS_RESOLVING:
57 return "resolving "; 73 return "resolving ";
58 break;
59 case INGRESS_RESOLVED: 74 case INGRESS_RESOLVED:
60 return "resolved "; 75 return "resolved ";
61 break;
62 case INGRESS_INVALID: 76 case INGRESS_INVALID:
63 return "invalid "; 77 return "invalid ";
64 break;
65 case INGRESS_REFRESHING: 78 case INGRESS_REFRESHING:
66 return "refreshing "; 79 return "refreshing ";
67 break;
68 default:
69 return "";
70 } 80 }
81
82 return "";
71} 83}
72 84
73/* 85/*
74 * Returns the state of an egress cache entry as a string 86 * Returns the state of an egress cache entry as a string
75 */ 87 */
76static const char *egress_state_string(int state){ 88static const char *egress_state_string(int state)
77 switch(state) { 89{
90 switch (state) {
78 case EGRESS_RESOLVED: 91 case EGRESS_RESOLVED:
79 return "resolved "; 92 return "resolved ";
80 break;
81 case EGRESS_PURGE: 93 case EGRESS_PURGE:
82 return "purge "; 94 return "purge ";
83 break;
84 case EGRESS_INVALID: 95 case EGRESS_INVALID:
85 return "invalid "; 96 return "invalid ";
86 break;
87 default:
88 return "";
89 } 97 }
98
99 return "";
90} 100}
91 101
92/* 102/*
@@ -123,7 +133,6 @@ static void mpc_stop(struct seq_file *m, void *v)
123static int mpc_show(struct seq_file *m, void *v) 133static int mpc_show(struct seq_file *m, void *v)
124{ 134{
125 struct mpoa_client *mpc = v; 135 struct mpoa_client *mpc = v;
126 unsigned char *temp;
127 int i; 136 int i;
128 in_cache_entry *in_entry; 137 in_cache_entry *in_entry;
129 eg_cache_entry *eg_entry; 138 eg_cache_entry *eg_entry;
@@ -140,15 +149,17 @@ static int mpc_show(struct seq_file *m, void *v)
140 do_gettimeofday(&now); 149 do_gettimeofday(&now);
141 150
142 for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) { 151 for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) {
143 temp = (unsigned char *)&in_entry->ctrl_info.in_dst_ip; 152 sprintf(ip_string, "%pI4", &in_entry->ctrl_info.in_dst_ip);
144 sprintf(ip_string,"%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
145 seq_printf(m, "%-16s%s%-14lu%-12u", 153 seq_printf(m, "%-16s%s%-14lu%-12u",
146 ip_string, 154 ip_string,
147 ingress_state_string(in_entry->entry_state), 155 ingress_state_string(in_entry->entry_state),
148 in_entry->ctrl_info.holding_time-(now.tv_sec-in_entry->tv.tv_sec), 156 in_entry->ctrl_info.holding_time -
149 in_entry->packets_fwded); 157 (now.tv_sec-in_entry->tv.tv_sec),
158 in_entry->packets_fwded);
150 if (in_entry->shortcut) 159 if (in_entry->shortcut)
151 seq_printf(m, " %-3d %-3d",in_entry->shortcut->vpi,in_entry->shortcut->vci); 160 seq_printf(m, " %-3d %-3d",
161 in_entry->shortcut->vpi,
162 in_entry->shortcut->vci);
152 seq_printf(m, "\n"); 163 seq_printf(m, "\n");
153 } 164 }
154 165
@@ -156,21 +167,23 @@ static int mpc_show(struct seq_file *m, void *v)
156 seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n"); 167 seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n");
157 for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) { 168 for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) {
158 unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr; 169 unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr;
159 for(i = 0; i < ATM_ESA_LEN; i++) 170 for (i = 0; i < ATM_ESA_LEN; i++)
160 seq_printf(m, "%02x", p[i]); 171 seq_printf(m, "%02x", p[i]);
161 seq_printf(m, "\n%-16lu%s%-14lu%-15u", 172 seq_printf(m, "\n%-16lu%s%-14lu%-15u",
162 (unsigned long)ntohl(eg_entry->ctrl_info.cache_id), 173 (unsigned long)ntohl(eg_entry->ctrl_info.cache_id),
163 egress_state_string(eg_entry->entry_state), 174 egress_state_string(eg_entry->entry_state),
164 (eg_entry->ctrl_info.holding_time-(now.tv_sec-eg_entry->tv.tv_sec)), 175 (eg_entry->ctrl_info.holding_time -
176 (now.tv_sec-eg_entry->tv.tv_sec)),
165 eg_entry->packets_rcvd); 177 eg_entry->packets_rcvd);
166 178
167 /* latest IP address */ 179 /* latest IP address */
168 temp = (unsigned char *)&eg_entry->latest_ip_addr; 180 sprintf(ip_string, "%pI4", &eg_entry->latest_ip_addr);
169 sprintf(ip_string, "%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
170 seq_printf(m, "%-16s", ip_string); 181 seq_printf(m, "%-16s", ip_string);
171 182
172 if (eg_entry->shortcut) 183 if (eg_entry->shortcut)
173 seq_printf(m, " %-3d %-3d",eg_entry->shortcut->vpi,eg_entry->shortcut->vci); 184 seq_printf(m, " %-3d %-3d",
185 eg_entry->shortcut->vpi,
186 eg_entry->shortcut->vci);
174 seq_printf(m, "\n"); 187 seq_printf(m, "\n");
175 } 188 }
176 seq_printf(m, "\n"); 189 seq_printf(m, "\n");
@@ -258,12 +271,9 @@ static int parse_qos(const char *buff)
258 qos.rxtp.max_pcr = rx_pcr; 271 qos.rxtp.max_pcr = rx_pcr;
259 qos.rxtp.max_sdu = rx_sdu; 272 qos.rxtp.max_sdu = rx_sdu;
260 qos.aal = ATM_AAL5; 273 qos.aal = ATM_AAL5;
261 dprintk("mpoa: mpoa_proc.c: parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n", 274 dprintk("parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n",
262 qos.txtp.max_pcr, 275 qos.txtp.max_pcr, qos.txtp.max_sdu,
263 qos.txtp.max_sdu, 276 qos.rxtp.max_pcr, qos.rxtp.max_sdu);
264 qos.rxtp.max_pcr,
265 qos.rxtp.max_sdu
266 );
267 277
268 atm_mpoa_add_qos(ipaddr, &qos); 278 atm_mpoa_add_qos(ipaddr, &qos);
269 return 1; 279 return 1;
@@ -278,7 +288,7 @@ int mpc_proc_init(void)
278 288
279 p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_file_operations); 289 p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_file_operations);
280 if (!p) { 290 if (!p) {
281 printk(KERN_ERR "Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME); 291 pr_err("Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME);
282 return -ENOMEM; 292 return -ENOMEM;
283 } 293 }
284 return 0; 294 return 0;
@@ -289,10 +299,9 @@ int mpc_proc_init(void)
289 */ 299 */
290void mpc_proc_clean(void) 300void mpc_proc_clean(void)
291{ 301{
292 remove_proc_entry(STAT_FILE_NAME,atm_proc_root); 302 remove_proc_entry(STAT_FILE_NAME, atm_proc_root);
293} 303}
294 304
295
296#endif /* CONFIG_PROC_FS */ 305#endif /* CONFIG_PROC_FS */
297 306
298 307
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 0af84cd4f65b..400839273c67 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -33,6 +33,8 @@
33 * These hooks are not yet available in ppp_generic 33 * These hooks are not yet available in ppp_generic
34 */ 34 */
35 35
36#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
37
36#include <linux/module.h> 38#include <linux/module.h>
37#include <linux/init.h> 39#include <linux/init.h>
38#include <linux/skbuff.h> 40#include <linux/skbuff.h>
@@ -132,7 +134,7 @@ static void pppoatm_unassign_vcc(struct atm_vcc *atmvcc)
132static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb) 134static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
133{ 135{
134 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc); 136 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
135 pr_debug("pppoatm push\n"); 137 pr_debug("\n");
136 if (skb == NULL) { /* VCC was closed */ 138 if (skb == NULL) { /* VCC was closed */
137 pr_debug("removing ATMPPP VCC %p\n", pvcc); 139 pr_debug("removing ATMPPP VCC %p\n", pvcc);
138 pppoatm_unassign_vcc(atmvcc); 140 pppoatm_unassign_vcc(atmvcc);
@@ -165,17 +167,17 @@ static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
165 pvcc->chan.mtu += LLC_LEN; 167 pvcc->chan.mtu += LLC_LEN;
166 break; 168 break;
167 } 169 }
168 pr_debug("Couldn't autodetect yet " 170 pr_debug("Couldn't autodetect yet (skb: %02X %02X %02X %02X %02X %02X)\n",
169 "(skb: %02X %02X %02X %02X %02X %02X)\n", 171 skb->data[0], skb->data[1], skb->data[2],
170 skb->data[0], skb->data[1], skb->data[2], 172 skb->data[3], skb->data[4], skb->data[5]);
171 skb->data[3], skb->data[4], skb->data[5]);
172 goto error; 173 goto error;
173 case e_vc: 174 case e_vc:
174 break; 175 break;
175 } 176 }
176 ppp_input(&pvcc->chan, skb); 177 ppp_input(&pvcc->chan, skb);
177 return; 178 return;
178 error: 179
180error:
179 kfree_skb(skb); 181 kfree_skb(skb);
180 ppp_input_error(&pvcc->chan, 0); 182 ppp_input_error(&pvcc->chan, 0);
181} 183}
@@ -194,7 +196,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
194{ 196{
195 struct pppoatm_vcc *pvcc = chan_to_pvcc(chan); 197 struct pppoatm_vcc *pvcc = chan_to_pvcc(chan);
196 ATM_SKB(skb)->vcc = pvcc->atmvcc; 198 ATM_SKB(skb)->vcc = pvcc->atmvcc;
197 pr_debug("pppoatm_send (skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc); 199 pr_debug("(skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc);
198 if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT)) 200 if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT))
199 (void) skb_pull(skb, 1); 201 (void) skb_pull(skb, 1);
200 switch (pvcc->encaps) { /* LLC encapsulation needed */ 202 switch (pvcc->encaps) { /* LLC encapsulation needed */
@@ -208,7 +210,8 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
208 goto nospace; 210 goto nospace;
209 } 211 }
210 kfree_skb(skb); 212 kfree_skb(skb);
211 if ((skb = n) == NULL) 213 skb = n;
214 if (skb == NULL)
212 return DROP_PACKET; 215 return DROP_PACKET;
213 } else if (!atm_may_send(pvcc->atmvcc, skb->truesize)) 216 } else if (!atm_may_send(pvcc->atmvcc, skb->truesize))
214 goto nospace; 217 goto nospace;
@@ -226,11 +229,11 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
226 229
227 atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc); 230 atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
228 ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options; 231 ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
229 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, ATM_SKB(skb)->vcc, 232 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
230 ATM_SKB(skb)->vcc->dev); 233 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
231 return ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb) 234 return ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
232 ? DROP_PACKET : 1; 235 ? DROP_PACKET : 1;
233 nospace: 236nospace:
234 /* 237 /*
235 * We don't have space to send this SKB now, but we might have 238 * We don't have space to send this SKB now, but we might have
236 * already applied SC_COMP_PROT compression, so may need to undo 239 * already applied SC_COMP_PROT compression, so may need to undo
@@ -289,7 +292,8 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
289 (be.encaps == e_vc ? 0 : LLC_LEN); 292 (be.encaps == e_vc ? 0 : LLC_LEN);
290 pvcc->wakeup_tasklet = tasklet_proto; 293 pvcc->wakeup_tasklet = tasklet_proto;
291 pvcc->wakeup_tasklet.data = (unsigned long) &pvcc->chan; 294 pvcc->wakeup_tasklet.data = (unsigned long) &pvcc->chan;
292 if ((err = ppp_register_channel(&pvcc->chan)) != 0) { 295 err = ppp_register_channel(&pvcc->chan);
296 if (err != 0) {
293 kfree(pvcc); 297 kfree(pvcc);
294 return err; 298 return err;
295 } 299 }
diff --git a/net/atm/proc.c b/net/atm/proc.c
index ab8419a324b6..7a96b2376bd7 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -24,15 +24,15 @@
24#include <linux/init.h> /* for __init */ 24#include <linux/init.h> /* for __init */
25#include <net/net_namespace.h> 25#include <net/net_namespace.h>
26#include <net/atmclip.h> 26#include <net/atmclip.h>
27#include <asm/uaccess.h> 27#include <linux/uaccess.h>
28#include <linux/param.h> /* for HZ */
28#include <asm/atomic.h> 29#include <asm/atomic.h>
29#include <asm/param.h> /* for HZ */
30#include "resources.h" 30#include "resources.h"
31#include "common.h" /* atm_proc_init prototype */ 31#include "common.h" /* atm_proc_init prototype */
32#include "signaling.h" /* to get sigd - ugly too */ 32#include "signaling.h" /* to get sigd - ugly too */
33 33
34static ssize_t proc_dev_atm_read(struct file *file,char __user *buf,size_t count, 34static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
35 loff_t *pos); 35 size_t count, loff_t *pos);
36 36
37static const struct file_operations proc_atm_dev_ops = { 37static const struct file_operations proc_atm_dev_ops = {
38 .owner = THIS_MODULE, 38 .owner = THIS_MODULE,
@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
43 const struct k_atm_aal_stats *stats) 43 const struct k_atm_aal_stats *stats)
44{ 44{
45 seq_printf(seq, "%s ( %d %d %d %d %d )", aal, 45 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
46 atomic_read(&stats->tx),atomic_read(&stats->tx_err), 46 atomic_read(&stats->tx), atomic_read(&stats->tx_err),
47 atomic_read(&stats->rx),atomic_read(&stats->rx_err), 47 atomic_read(&stats->rx), atomic_read(&stats->rx_err),
48 atomic_read(&stats->rx_drop)); 48 atomic_read(&stats->rx_drop));
49} 49}
50 50
51static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev) 51static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
@@ -151,8 +151,8 @@ static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
151 151
152static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc) 152static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc)
153{ 153{
154 static const char *const class_name[] = 154 static const char *const class_name[] = {
155 {"off","UBR","CBR","VBR","ABR"}; 155 "off", "UBR", "CBR", "VBR", "ABR"};
156 static const char *const aal_name[] = { 156 static const char *const aal_name[] = {
157 "---", "1", "2", "3/4", /* 0- 3 */ 157 "---", "1", "2", "3/4", /* 0- 3 */
158 "???", "5", "???", "???", /* 4- 7 */ 158 "???", "5", "???", "???", /* 4- 7 */
@@ -160,11 +160,12 @@ static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc)
160 "???", "0", "???", "???"}; /* 12-15 */ 160 "???", "0", "???", "???"}; /* 12-15 */
161 161
162 seq_printf(seq, "%3d %3d %5d %-3s %7d %-5s %7d %-6s", 162 seq_printf(seq, "%3d %3d %5d %-3s %7d %-5s %7d %-6s",
163 vcc->dev->number,vcc->vpi,vcc->vci, 163 vcc->dev->number, vcc->vpi, vcc->vci,
164 vcc->qos.aal >= ARRAY_SIZE(aal_name) ? "err" : 164 vcc->qos.aal >= ARRAY_SIZE(aal_name) ? "err" :
165 aal_name[vcc->qos.aal],vcc->qos.rxtp.min_pcr, 165 aal_name[vcc->qos.aal], vcc->qos.rxtp.min_pcr,
166 class_name[vcc->qos.rxtp.traffic_class],vcc->qos.txtp.min_pcr, 166 class_name[vcc->qos.rxtp.traffic_class],
167 class_name[vcc->qos.txtp.traffic_class]); 167 vcc->qos.txtp.min_pcr,
168 class_name[vcc->qos.txtp.traffic_class]);
168 if (test_bit(ATM_VF_IS_CLIP, &vcc->flags)) { 169 if (test_bit(ATM_VF_IS_CLIP, &vcc->flags)) {
169 struct clip_vcc *clip_vcc = CLIP_VCC(vcc); 170 struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
170 struct net_device *dev; 171 struct net_device *dev;
@@ -195,19 +196,20 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
195 seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi, 196 seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi,
196 vcc->vci); 197 vcc->vci);
197 switch (sk->sk_family) { 198 switch (sk->sk_family) {
198 case AF_ATMPVC: 199 case AF_ATMPVC:
199 seq_printf(seq, "PVC"); 200 seq_printf(seq, "PVC");
200 break; 201 break;
201 case AF_ATMSVC: 202 case AF_ATMSVC:
202 seq_printf(seq, "SVC"); 203 seq_printf(seq, "SVC");
203 break; 204 break;
204 default: 205 default:
205 seq_printf(seq, "%3d", sk->sk_family); 206 seq_printf(seq, "%3d", sk->sk_family);
206 } 207 }
207 seq_printf(seq, " %04lx %5d %7d/%7d %7d/%7d [%d]\n", vcc->flags, sk->sk_err, 208 seq_printf(seq, " %04lx %5d %7d/%7d %7d/%7d [%d]\n",
208 sk_wmem_alloc_get(sk), sk->sk_sndbuf, 209 vcc->flags, sk->sk_err,
209 sk_rmem_alloc_get(sk), sk->sk_rcvbuf, 210 sk_wmem_alloc_get(sk), sk->sk_sndbuf,
210 atomic_read(&sk->sk_refcnt)); 211 sk_rmem_alloc_get(sk), sk->sk_rcvbuf,
212 atomic_read(&sk->sk_refcnt));
211} 213}
212 214
213static void svc_info(struct seq_file *seq, struct atm_vcc *vcc) 215static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
@@ -236,7 +238,7 @@ static int atm_dev_seq_show(struct seq_file *seq, void *v)
236 "Itf Type ESI/\"MAC\"addr " 238 "Itf Type ESI/\"MAC\"addr "
237 "AAL(TX,err,RX,err,drop) ... [refcnt]\n"; 239 "AAL(TX,err,RX,err,drop) ... [refcnt]\n";
238 240
239 if (v == SEQ_START_TOKEN) 241 if (v == &atm_devs)
240 seq_puts(seq, atm_dev_banner); 242 seq_puts(seq, atm_dev_banner);
241 else { 243 else {
242 struct atm_dev *dev = list_entry(v, struct atm_dev, dev_list); 244 struct atm_dev *dev = list_entry(v, struct atm_dev, dev_list);
@@ -376,32 +378,35 @@ static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
376 unsigned long page; 378 unsigned long page;
377 int length; 379 int length;
378 380
379 if (count == 0) return 0; 381 if (count == 0)
382 return 0;
380 page = get_zeroed_page(GFP_KERNEL); 383 page = get_zeroed_page(GFP_KERNEL);
381 if (!page) return -ENOMEM; 384 if (!page)
385 return -ENOMEM;
382 dev = PDE(file->f_path.dentry->d_inode)->data; 386 dev = PDE(file->f_path.dentry->d_inode)->data;
383 if (!dev->ops->proc_read) 387 if (!dev->ops->proc_read)
384 length = -EINVAL; 388 length = -EINVAL;
385 else { 389 else {
386 length = dev->ops->proc_read(dev,pos,(char *) page); 390 length = dev->ops->proc_read(dev, pos, (char *)page);
387 if (length > count) length = -EINVAL; 391 if (length > count)
392 length = -EINVAL;
388 } 393 }
389 if (length >= 0) { 394 if (length >= 0) {
390 if (copy_to_user(buf,(char *) page,length)) length = -EFAULT; 395 if (copy_to_user(buf, (char *)page, length))
396 length = -EFAULT;
391 (*pos)++; 397 (*pos)++;
392 } 398 }
393 free_page(page); 399 free_page(page);
394 return length; 400 return length;
395} 401}
396 402
397
398struct proc_dir_entry *atm_proc_root; 403struct proc_dir_entry *atm_proc_root;
399EXPORT_SYMBOL(atm_proc_root); 404EXPORT_SYMBOL(atm_proc_root);
400 405
401 406
402int atm_proc_dev_register(struct atm_dev *dev) 407int atm_proc_dev_register(struct atm_dev *dev)
403{ 408{
404 int digits,num; 409 int digits, num;
405 int error; 410 int error;
406 411
407 /* No proc info */ 412 /* No proc info */
@@ -410,26 +415,28 @@ int atm_proc_dev_register(struct atm_dev *dev)
410 415
411 error = -ENOMEM; 416 error = -ENOMEM;
412 digits = 0; 417 digits = 0;
413 for (num = dev->number; num; num /= 10) digits++; 418 for (num = dev->number; num; num /= 10)
414 if (!digits) digits++; 419 digits++;
420 if (!digits)
421 digits++;
415 422
416 dev->proc_name = kmalloc(strlen(dev->type) + digits + 2, GFP_KERNEL); 423 dev->proc_name = kmalloc(strlen(dev->type) + digits + 2, GFP_KERNEL);
417 if (!dev->proc_name) 424 if (!dev->proc_name)
418 goto err_out; 425 goto err_out;
419 sprintf(dev->proc_name,"%s:%d",dev->type, dev->number); 426 sprintf(dev->proc_name, "%s:%d", dev->type, dev->number);
420 427
421 dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root, 428 dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root,
422 &proc_atm_dev_ops, dev); 429 &proc_atm_dev_ops, dev);
423 if (!dev->proc_entry) 430 if (!dev->proc_entry)
424 goto err_free_name; 431 goto err_free_name;
425 return 0; 432 return 0;
433
426err_free_name: 434err_free_name:
427 kfree(dev->proc_name); 435 kfree(dev->proc_name);
428err_out: 436err_out:
429 return error; 437 return error;
430} 438}
431 439
432
433void atm_proc_dev_deregister(struct atm_dev *dev) 440void atm_proc_dev_deregister(struct atm_dev *dev)
434{ 441{
435 if (!dev->ops->proc_read) 442 if (!dev->ops->proc_read)
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index 8d74e62b0d79..437ee70c5e62 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -17,32 +17,35 @@
17#include "common.h" /* common for PVCs and SVCs */ 17#include "common.h" /* common for PVCs and SVCs */
18 18
19 19
20static int pvc_shutdown(struct socket *sock,int how) 20static int pvc_shutdown(struct socket *sock, int how)
21{ 21{
22 return 0; 22 return 0;
23} 23}
24 24
25 25static int pvc_bind(struct socket *sock, struct sockaddr *sockaddr,
26static int pvc_bind(struct socket *sock,struct sockaddr *sockaddr, 26 int sockaddr_len)
27 int sockaddr_len)
28{ 27{
29 struct sock *sk = sock->sk; 28 struct sock *sk = sock->sk;
30 struct sockaddr_atmpvc *addr; 29 struct sockaddr_atmpvc *addr;
31 struct atm_vcc *vcc; 30 struct atm_vcc *vcc;
32 int error; 31 int error;
33 32
34 if (sockaddr_len != sizeof(struct sockaddr_atmpvc)) return -EINVAL; 33 if (sockaddr_len != sizeof(struct sockaddr_atmpvc))
35 addr = (struct sockaddr_atmpvc *) sockaddr; 34 return -EINVAL;
36 if (addr->sap_family != AF_ATMPVC) return -EAFNOSUPPORT; 35 addr = (struct sockaddr_atmpvc *)sockaddr;
36 if (addr->sap_family != AF_ATMPVC)
37 return -EAFNOSUPPORT;
37 lock_sock(sk); 38 lock_sock(sk);
38 vcc = ATM_SD(sock); 39 vcc = ATM_SD(sock);
39 if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) { 40 if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
40 error = -EBADFD; 41 error = -EBADFD;
41 goto out; 42 goto out;
42 } 43 }
43 if (test_bit(ATM_VF_PARTIAL,&vcc->flags)) { 44 if (test_bit(ATM_VF_PARTIAL, &vcc->flags)) {
44 if (vcc->vpi != ATM_VPI_UNSPEC) addr->sap_addr.vpi = vcc->vpi; 45 if (vcc->vpi != ATM_VPI_UNSPEC)
45 if (vcc->vci != ATM_VCI_UNSPEC) addr->sap_addr.vci = vcc->vci; 46 addr->sap_addr.vpi = vcc->vpi;
47 if (vcc->vci != ATM_VCI_UNSPEC)
48 addr->sap_addr.vci = vcc->vci;
46 } 49 }
47 error = vcc_connect(sock, addr->sap_addr.itf, addr->sap_addr.vpi, 50 error = vcc_connect(sock, addr->sap_addr.itf, addr->sap_addr.vpi,
48 addr->sap_addr.vci); 51 addr->sap_addr.vci);
@@ -51,11 +54,10 @@ out:
51 return error; 54 return error;
52} 55}
53 56
54 57static int pvc_connect(struct socket *sock, struct sockaddr *sockaddr,
55static int pvc_connect(struct socket *sock,struct sockaddr *sockaddr, 58 int sockaddr_len, int flags)
56 int sockaddr_len,int flags)
57{ 59{
58 return pvc_bind(sock,sockaddr,sockaddr_len); 60 return pvc_bind(sock, sockaddr, sockaddr_len);
59} 61}
60 62
61static int pvc_setsockopt(struct socket *sock, int level, int optname, 63static int pvc_setsockopt(struct socket *sock, int level, int optname,
@@ -70,7 +72,6 @@ static int pvc_setsockopt(struct socket *sock, int level, int optname,
70 return error; 72 return error;
71} 73}
72 74
73
74static int pvc_getsockopt(struct socket *sock, int level, int optname, 75static int pvc_getsockopt(struct socket *sock, int level, int optname,
75 char __user *optval, int __user *optlen) 76 char __user *optval, int __user *optlen)
76{ 77{
@@ -83,16 +84,16 @@ static int pvc_getsockopt(struct socket *sock, int level, int optname,
83 return error; 84 return error;
84} 85}
85 86
86 87static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr,
87static int pvc_getname(struct socket *sock,struct sockaddr *sockaddr, 88 int *sockaddr_len, int peer)
88 int *sockaddr_len,int peer)
89{ 89{
90 struct sockaddr_atmpvc *addr; 90 struct sockaddr_atmpvc *addr;
91 struct atm_vcc *vcc = ATM_SD(sock); 91 struct atm_vcc *vcc = ATM_SD(sock);
92 92
93 if (!vcc->dev || !test_bit(ATM_VF_ADDR,&vcc->flags)) return -ENOTCONN; 93 if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
94 return -ENOTCONN;
94 *sockaddr_len = sizeof(struct sockaddr_atmpvc); 95 *sockaddr_len = sizeof(struct sockaddr_atmpvc);
95 addr = (struct sockaddr_atmpvc *) sockaddr; 96 addr = (struct sockaddr_atmpvc *)sockaddr;
96 addr->sap_family = AF_ATMPVC; 97 addr->sap_family = AF_ATMPVC;
97 addr->sap_addr.itf = vcc->dev->number; 98 addr->sap_addr.itf = vcc->dev->number;
98 addr->sap_addr.vpi = vcc->vpi; 99 addr->sap_addr.vpi = vcc->vpi;
@@ -100,7 +101,6 @@ static int pvc_getname(struct socket *sock,struct sockaddr *sockaddr,
100 return 0; 101 return 0;
101} 102}
102 103
103
104static const struct proto_ops pvc_proto_ops = { 104static const struct proto_ops pvc_proto_ops = {
105 .family = PF_ATMPVC, 105 .family = PF_ATMPVC,
106 .owner = THIS_MODULE, 106 .owner = THIS_MODULE,
@@ -137,7 +137,6 @@ static int pvc_create(struct net *net, struct socket *sock, int protocol,
137 return vcc_create(net, sock, protocol, PF_ATMPVC); 137 return vcc_create(net, sock, protocol, PF_ATMPVC);
138} 138}
139 139
140
141static const struct net_proto_family pvc_family_ops = { 140static const struct net_proto_family pvc_family_ops = {
142 .family = PF_ATMPVC, 141 .family = PF_ATMPVC,
143 .create = pvc_create, 142 .create = pvc_create,
diff --git a/net/atm/raw.c b/net/atm/raw.c
index cbfcc71a17b1..d0c4bd047dc4 100644
--- a/net/atm/raw.c
+++ b/net/atm/raw.c
@@ -2,6 +2,7 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
5 6
6#include <linux/module.h> 7#include <linux/module.h>
7#include <linux/atmdev.h> 8#include <linux/atmdev.h>
@@ -17,7 +18,7 @@
17 * SKB == NULL indicates that the link is being closed 18 * SKB == NULL indicates that the link is being closed
18 */ 19 */
19 20
20static void atm_push_raw(struct atm_vcc *vcc,struct sk_buff *skb) 21static void atm_push_raw(struct atm_vcc *vcc, struct sk_buff *skb)
21{ 22{
22 if (skb) { 23 if (skb) {
23 struct sock *sk = sk_atm(vcc); 24 struct sock *sk = sk_atm(vcc);
@@ -27,36 +28,33 @@ static void atm_push_raw(struct atm_vcc *vcc,struct sk_buff *skb)
27 } 28 }
28} 29}
29 30
30 31static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
31static void atm_pop_raw(struct atm_vcc *vcc,struct sk_buff *skb)
32{ 32{
33 struct sock *sk = sk_atm(vcc); 33 struct sock *sk = sk_atm(vcc);
34 34
35 pr_debug("APopR (%d) %d -= %d\n", vcc->vci, 35 pr_debug("(%d) %d -= %d\n",
36 sk_wmem_alloc_get(sk), skb->truesize); 36 vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
37 atomic_sub(skb->truesize, &sk->sk_wmem_alloc); 37 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
38 dev_kfree_skb_any(skb); 38 dev_kfree_skb_any(skb);
39 sk->sk_write_space(sk); 39 sk->sk_write_space(sk);
40} 40}
41 41
42 42static int atm_send_aal0(struct atm_vcc *vcc, struct sk_buff *skb)
43static int atm_send_aal0(struct atm_vcc *vcc,struct sk_buff *skb)
44{ 43{
45 /* 44 /*
46 * Note that if vpi/vci are _ANY or _UNSPEC the below will 45 * Note that if vpi/vci are _ANY or _UNSPEC the below will
47 * still work 46 * still work
48 */ 47 */
49 if (!capable(CAP_NET_ADMIN) && 48 if (!capable(CAP_NET_ADMIN) &&
50 (((u32 *) skb->data)[0] & (ATM_HDR_VPI_MASK | ATM_HDR_VCI_MASK)) != 49 (((u32 *)skb->data)[0] & (ATM_HDR_VPI_MASK | ATM_HDR_VCI_MASK)) !=
51 ((vcc->vpi << ATM_HDR_VPI_SHIFT) | (vcc->vci << ATM_HDR_VCI_SHIFT))) 50 ((vcc->vpi << ATM_HDR_VPI_SHIFT) |
52 { 51 (vcc->vci << ATM_HDR_VCI_SHIFT))) {
53 kfree_skb(skb); 52 kfree_skb(skb);
54 return -EADDRNOTAVAIL; 53 return -EADDRNOTAVAIL;
55 } 54 }
56 return vcc->dev->ops->send(vcc,skb); 55 return vcc->dev->ops->send(vcc, skb);
57} 56}
58 57
59
60int atm_init_aal0(struct atm_vcc *vcc) 58int atm_init_aal0(struct atm_vcc *vcc)
61{ 59{
62 vcc->push = atm_push_raw; 60 vcc->push = atm_push_raw;
@@ -66,7 +64,6 @@ int atm_init_aal0(struct atm_vcc *vcc)
66 return 0; 64 return 0;
67} 65}
68 66
69
70int atm_init_aal34(struct atm_vcc *vcc) 67int atm_init_aal34(struct atm_vcc *vcc)
71{ 68{
72 vcc->push = atm_push_raw; 69 vcc->push = atm_push_raw;
@@ -76,7 +73,6 @@ int atm_init_aal34(struct atm_vcc *vcc)
76 return 0; 73 return 0;
77} 74}
78 75
79
80int atm_init_aal5(struct atm_vcc *vcc) 76int atm_init_aal5(struct atm_vcc *vcc)
81{ 77{
82 vcc->push = atm_push_raw; 78 vcc->push = atm_push_raw;
@@ -85,6 +81,4 @@ int atm_init_aal5(struct atm_vcc *vcc)
85 vcc->send = vcc->dev->ops->send; 81 vcc->send = vcc->dev->ops->send;
86 return 0; 82 return 0;
87} 83}
88
89
90EXPORT_SYMBOL(atm_init_aal5); 84EXPORT_SYMBOL(atm_init_aal5);
diff --git a/net/atm/resources.c b/net/atm/resources.c
index 56b7322ff461..90082904f20d 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -7,6 +7,7 @@
7 * 2002/01 - don't free the whole struct sock on sk->destruct time, 7 * 2002/01 - don't free the whole struct sock on sk->destruct time,
8 * use the default destruct function initialized by sock_init_data */ 8 * use the default destruct function initialized by sock_init_data */
9 9
10#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
10 11
11#include <linux/ctype.h> 12#include <linux/ctype.h>
12#include <linux/string.h> 13#include <linux/string.h>
@@ -70,7 +71,7 @@ struct atm_dev *atm_dev_lookup(int number)
70 mutex_unlock(&atm_dev_mutex); 71 mutex_unlock(&atm_dev_mutex);
71 return dev; 72 return dev;
72} 73}
73 74EXPORT_SYMBOL(atm_dev_lookup);
74 75
75struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops, 76struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops,
76 int number, unsigned long *flags) 77 int number, unsigned long *flags)
@@ -79,13 +80,13 @@ struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops,
79 80
80 dev = __alloc_atm_dev(type); 81 dev = __alloc_atm_dev(type);
81 if (!dev) { 82 if (!dev) {
82 printk(KERN_ERR "atm_dev_register: no space for dev %s\n", 83 pr_err("no space for dev %s\n", type);
83 type);
84 return NULL; 84 return NULL;
85 } 85 }
86 mutex_lock(&atm_dev_mutex); 86 mutex_lock(&atm_dev_mutex);
87 if (number != -1) { 87 if (number != -1) {
88 if ((inuse = __atm_dev_lookup(number))) { 88 inuse = __atm_dev_lookup(number);
89 if (inuse) {
89 atm_dev_put(inuse); 90 atm_dev_put(inuse);
90 mutex_unlock(&atm_dev_mutex); 91 mutex_unlock(&atm_dev_mutex);
91 kfree(dev); 92 kfree(dev);
@@ -109,16 +110,12 @@ struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops,
109 atomic_set(&dev->refcnt, 1); 110 atomic_set(&dev->refcnt, 1);
110 111
111 if (atm_proc_dev_register(dev) < 0) { 112 if (atm_proc_dev_register(dev) < 0) {
112 printk(KERN_ERR "atm_dev_register: " 113 pr_err("atm_proc_dev_register failed for dev %s\n", type);
113 "atm_proc_dev_register failed for dev %s\n",
114 type);
115 goto out_fail; 114 goto out_fail;
116 } 115 }
117 116
118 if (atm_register_sysfs(dev) < 0) { 117 if (atm_register_sysfs(dev) < 0) {
119 printk(KERN_ERR "atm_dev_register: " 118 pr_err("atm_register_sysfs failed for dev %s\n", type);
120 "atm_register_sysfs failed for dev %s\n",
121 type);
122 atm_proc_dev_deregister(dev); 119 atm_proc_dev_deregister(dev);
123 goto out_fail; 120 goto out_fail;
124 } 121 }
@@ -134,7 +131,7 @@ out_fail:
134 dev = NULL; 131 dev = NULL;
135 goto out; 132 goto out;
136} 133}
137 134EXPORT_SYMBOL(atm_dev_register);
138 135
139void atm_dev_deregister(struct atm_dev *dev) 136void atm_dev_deregister(struct atm_dev *dev)
140{ 137{
@@ -156,7 +153,7 @@ void atm_dev_deregister(struct atm_dev *dev)
156 153
157 atm_dev_put(dev); 154 atm_dev_put(dev);
158} 155}
159 156EXPORT_SYMBOL(atm_dev_deregister);
160 157
161static void copy_aal_stats(struct k_atm_aal_stats *from, 158static void copy_aal_stats(struct k_atm_aal_stats *from,
162 struct atm_aal_stats *to) 159 struct atm_aal_stats *to)
@@ -166,7 +163,6 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
166#undef __HANDLE_ITEM 163#undef __HANDLE_ITEM
167} 164}
168 165
169
170static void subtract_aal_stats(struct k_atm_aal_stats *from, 166static void subtract_aal_stats(struct k_atm_aal_stats *from,
171 struct atm_aal_stats *to) 167 struct atm_aal_stats *to)
172{ 168{
@@ -175,8 +171,8 @@ static void subtract_aal_stats(struct k_atm_aal_stats *from,
175#undef __HANDLE_ITEM 171#undef __HANDLE_ITEM
176} 172}
177 173
178 174static int fetch_stats(struct atm_dev *dev, struct atm_dev_stats __user *arg,
179static int fetch_stats(struct atm_dev *dev, struct atm_dev_stats __user *arg, int zero) 175 int zero)
180{ 176{
181 struct atm_dev_stats tmp; 177 struct atm_dev_stats tmp;
182 int error = 0; 178 int error = 0;
@@ -194,7 +190,6 @@ static int fetch_stats(struct atm_dev *dev, struct atm_dev_stats __user *arg, in
194 return error ? -EFAULT : 0; 190 return error ? -EFAULT : 0;
195} 191}
196 192
197
198int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat) 193int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
199{ 194{
200 void __user *buf; 195 void __user *buf;
@@ -210,50 +205,49 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
210#endif 205#endif
211 206
212 switch (cmd) { 207 switch (cmd) {
213 case ATM_GETNAMES: 208 case ATM_GETNAMES:
214 209 if (compat) {
215 if (compat) {
216#ifdef CONFIG_COMPAT 210#ifdef CONFIG_COMPAT
217 struct compat_atm_iobuf __user *ciobuf = arg; 211 struct compat_atm_iobuf __user *ciobuf = arg;
218 compat_uptr_t cbuf; 212 compat_uptr_t cbuf;
219 iobuf_len = &ciobuf->length; 213 iobuf_len = &ciobuf->length;
220 if (get_user(cbuf, &ciobuf->buffer)) 214 if (get_user(cbuf, &ciobuf->buffer))
221 return -EFAULT; 215 return -EFAULT;
222 buf = compat_ptr(cbuf); 216 buf = compat_ptr(cbuf);
223#endif 217#endif
224 } else { 218 } else {
225 struct atm_iobuf __user *iobuf = arg; 219 struct atm_iobuf __user *iobuf = arg;
226 iobuf_len = &iobuf->length; 220 iobuf_len = &iobuf->length;
227 if (get_user(buf, &iobuf->buffer)) 221 if (get_user(buf, &iobuf->buffer))
228 return -EFAULT;
229 }
230 if (get_user(len, iobuf_len))
231 return -EFAULT; 222 return -EFAULT;
232 mutex_lock(&atm_dev_mutex); 223 }
233 list_for_each(p, &atm_devs) 224 if (get_user(len, iobuf_len))
234 size += sizeof(int); 225 return -EFAULT;
235 if (size > len) { 226 mutex_lock(&atm_dev_mutex);
236 mutex_unlock(&atm_dev_mutex); 227 list_for_each(p, &atm_devs)
237 return -E2BIG; 228 size += sizeof(int);
238 } 229 if (size > len) {
239 tmp_buf = kmalloc(size, GFP_ATOMIC); 230 mutex_unlock(&atm_dev_mutex);
240 if (!tmp_buf) { 231 return -E2BIG;
241 mutex_unlock(&atm_dev_mutex); 232 }
242 return -ENOMEM; 233 tmp_buf = kmalloc(size, GFP_ATOMIC);
243 } 234 if (!tmp_buf) {
244 tmp_p = tmp_buf;
245 list_for_each(p, &atm_devs) {
246 dev = list_entry(p, struct atm_dev, dev_list);
247 *tmp_p++ = dev->number;
248 }
249 mutex_unlock(&atm_dev_mutex); 235 mutex_unlock(&atm_dev_mutex);
250 error = ((copy_to_user(buf, tmp_buf, size)) || 236 return -ENOMEM;
251 put_user(size, iobuf_len)) 237 }
252 ? -EFAULT : 0; 238 tmp_p = tmp_buf;
253 kfree(tmp_buf); 239 list_for_each(p, &atm_devs) {
254 return error; 240 dev = list_entry(p, struct atm_dev, dev_list);
255 default: 241 *tmp_p++ = dev->number;
256 break; 242 }
243 mutex_unlock(&atm_dev_mutex);
244 error = ((copy_to_user(buf, tmp_buf, size)) ||
245 put_user(size, iobuf_len))
246 ? -EFAULT : 0;
247 kfree(tmp_buf);
248 return error;
249 default:
250 break;
257 } 251 }
258 252
259 if (compat) { 253 if (compat) {
@@ -282,166 +276,167 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
282 if (get_user(number, &sioc->number)) 276 if (get_user(number, &sioc->number))
283 return -EFAULT; 277 return -EFAULT;
284 } 278 }
285 if (!(dev = try_then_request_module(atm_dev_lookup(number), 279
286 "atm-device-%d", number))) 280 dev = try_then_request_module(atm_dev_lookup(number), "atm-device-%d",
281 number);
282 if (!dev)
287 return -ENODEV; 283 return -ENODEV;
288 284
289 switch (cmd) { 285 switch (cmd) {
290 case ATM_GETTYPE: 286 case ATM_GETTYPE:
291 size = strlen(dev->type) + 1; 287 size = strlen(dev->type) + 1;
292 if (copy_to_user(buf, dev->type, size)) { 288 if (copy_to_user(buf, dev->type, size)) {
293 error = -EFAULT; 289 error = -EFAULT;
294 goto done; 290 goto done;
295 } 291 }
296 break; 292 break;
297 case ATM_GETESI: 293 case ATM_GETESI:
298 size = ESI_LEN; 294 size = ESI_LEN;
299 if (copy_to_user(buf, dev->esi, size)) { 295 if (copy_to_user(buf, dev->esi, size)) {
300 error = -EFAULT; 296 error = -EFAULT;
301 goto done; 297 goto done;
302 } 298 }
303 break; 299 break;
304 case ATM_SETESI: 300 case ATM_SETESI:
305 { 301 {
306 int i; 302 int i;
307 303
308 for (i = 0; i < ESI_LEN; i++) 304 for (i = 0; i < ESI_LEN; i++)
309 if (dev->esi[i]) { 305 if (dev->esi[i]) {
310 error = -EEXIST; 306 error = -EEXIST;
311 goto done;
312 }
313 }
314 /* fall through */
315 case ATM_SETESIF:
316 {
317 unsigned char esi[ESI_LEN];
318
319 if (!capable(CAP_NET_ADMIN)) {
320 error = -EPERM;
321 goto done;
322 }
323 if (copy_from_user(esi, buf, ESI_LEN)) {
324 error = -EFAULT;
325 goto done;
326 }
327 memcpy(dev->esi, esi, ESI_LEN);
328 error = ESI_LEN;
329 goto done;
330 }
331 case ATM_GETSTATZ:
332 if (!capable(CAP_NET_ADMIN)) {
333 error = -EPERM;
334 goto done;
335 }
336 /* fall through */
337 case ATM_GETSTAT:
338 size = sizeof(struct atm_dev_stats);
339 error = fetch_stats(dev, buf, cmd == ATM_GETSTATZ);
340 if (error)
341 goto done;
342 break;
343 case ATM_GETCIRANGE:
344 size = sizeof(struct atm_cirange);
345 if (copy_to_user(buf, &dev->ci_range, size)) {
346 error = -EFAULT;
347 goto done;
348 }
349 break;
350 case ATM_GETLINKRATE:
351 size = sizeof(int);
352 if (copy_to_user(buf, &dev->link_rate, size)) {
353 error = -EFAULT;
354 goto done;
355 }
356 break;
357 case ATM_RSTADDR:
358 if (!capable(CAP_NET_ADMIN)) {
359 error = -EPERM;
360 goto done;
361 }
362 atm_reset_addr(dev, ATM_ADDR_LOCAL);
363 break;
364 case ATM_ADDADDR:
365 case ATM_DELADDR:
366 case ATM_ADDLECSADDR:
367 case ATM_DELLECSADDR:
368 if (!capable(CAP_NET_ADMIN)) {
369 error = -EPERM;
370 goto done;
371 }
372 {
373 struct sockaddr_atmsvc addr;
374
375 if (copy_from_user(&addr, buf, sizeof(addr))) {
376 error = -EFAULT;
377 goto done;
378 }
379 if (cmd == ATM_ADDADDR || cmd == ATM_ADDLECSADDR)
380 error = atm_add_addr(dev, &addr,
381 (cmd == ATM_ADDADDR ?
382 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
383 else
384 error = atm_del_addr(dev, &addr,
385 (cmd == ATM_DELADDR ?
386 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
387 goto done; 307 goto done;
388 } 308 }
389 case ATM_GETADDR: 309 }
390 case ATM_GETLECSADDR: 310 /* fall through */
391 error = atm_get_addr(dev, buf, len, 311 case ATM_SETESIF:
392 (cmd == ATM_GETADDR ? 312 {
313 unsigned char esi[ESI_LEN];
314
315 if (!capable(CAP_NET_ADMIN)) {
316 error = -EPERM;
317 goto done;
318 }
319 if (copy_from_user(esi, buf, ESI_LEN)) {
320 error = -EFAULT;
321 goto done;
322 }
323 memcpy(dev->esi, esi, ESI_LEN);
324 error = ESI_LEN;
325 goto done;
326 }
327 case ATM_GETSTATZ:
328 if (!capable(CAP_NET_ADMIN)) {
329 error = -EPERM;
330 goto done;
331 }
332 /* fall through */
333 case ATM_GETSTAT:
334 size = sizeof(struct atm_dev_stats);
335 error = fetch_stats(dev, buf, cmd == ATM_GETSTATZ);
336 if (error)
337 goto done;
338 break;
339 case ATM_GETCIRANGE:
340 size = sizeof(struct atm_cirange);
341 if (copy_to_user(buf, &dev->ci_range, size)) {
342 error = -EFAULT;
343 goto done;
344 }
345 break;
346 case ATM_GETLINKRATE:
347 size = sizeof(int);
348 if (copy_to_user(buf, &dev->link_rate, size)) {
349 error = -EFAULT;
350 goto done;
351 }
352 break;
353 case ATM_RSTADDR:
354 if (!capable(CAP_NET_ADMIN)) {
355 error = -EPERM;
356 goto done;
357 }
358 atm_reset_addr(dev, ATM_ADDR_LOCAL);
359 break;
360 case ATM_ADDADDR:
361 case ATM_DELADDR:
362 case ATM_ADDLECSADDR:
363 case ATM_DELLECSADDR:
364 {
365 struct sockaddr_atmsvc addr;
366
367 if (!capable(CAP_NET_ADMIN)) {
368 error = -EPERM;
369 goto done;
370 }
371
372 if (copy_from_user(&addr, buf, sizeof(addr))) {
373 error = -EFAULT;
374 goto done;
375 }
376 if (cmd == ATM_ADDADDR || cmd == ATM_ADDLECSADDR)
377 error = atm_add_addr(dev, &addr,
378 (cmd == ATM_ADDADDR ?
393 ATM_ADDR_LOCAL : ATM_ADDR_LECS)); 379 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
394 if (error < 0) 380 else
395 goto done; 381 error = atm_del_addr(dev, &addr,
396 size = error; 382 (cmd == ATM_DELADDR ?
397 /* may return 0, but later on size == 0 means "don't 383 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
398 write the length" */ 384 goto done;
399 error = put_user(size, sioc_len) 385 }
400 ? -EFAULT : 0; 386 case ATM_GETADDR:
387 case ATM_GETLECSADDR:
388 error = atm_get_addr(dev, buf, len,
389 (cmd == ATM_GETADDR ?
390 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
391 if (error < 0)
392 goto done;
393 size = error;
394 /* may return 0, but later on size == 0 means "don't
395 write the length" */
396 error = put_user(size, sioc_len) ? -EFAULT : 0;
397 goto done;
398 case ATM_SETLOOP:
399 if (__ATM_LM_XTRMT((int) (unsigned long) buf) &&
400 __ATM_LM_XTLOC((int) (unsigned long) buf) >
401 __ATM_LM_XTRMT((int) (unsigned long) buf)) {
402 error = -EINVAL;
403 goto done;
404 }
405 /* fall through */
406 case ATM_SETCIRANGE:
407 case SONET_GETSTATZ:
408 case SONET_SETDIAG:
409 case SONET_CLRDIAG:
410 case SONET_SETFRAMING:
411 if (!capable(CAP_NET_ADMIN)) {
412 error = -EPERM;
401 goto done; 413 goto done;
402 case ATM_SETLOOP: 414 }
403 if (__ATM_LM_XTRMT((int) (unsigned long) buf) && 415 /* fall through */
404 __ATM_LM_XTLOC((int) (unsigned long) buf) > 416 default:
405 __ATM_LM_XTRMT((int) (unsigned long) buf)) { 417 if (compat) {
418#ifdef CONFIG_COMPAT
419 if (!dev->ops->compat_ioctl) {
406 error = -EINVAL; 420 error = -EINVAL;
407 goto done; 421 goto done;
408 } 422 }
409 /* fall through */ 423 size = dev->ops->compat_ioctl(dev, cmd, buf);
410 case ATM_SETCIRANGE:
411 case SONET_GETSTATZ:
412 case SONET_SETDIAG:
413 case SONET_CLRDIAG:
414 case SONET_SETFRAMING:
415 if (!capable(CAP_NET_ADMIN)) {
416 error = -EPERM;
417 goto done;
418 }
419 /* fall through */
420 default:
421 if (compat) {
422#ifdef CONFIG_COMPAT
423 if (!dev->ops->compat_ioctl) {
424 error = -EINVAL;
425 goto done;
426 }
427 size = dev->ops->compat_ioctl(dev, cmd, buf);
428#endif 424#endif
429 } else { 425 } else {
430 if (!dev->ops->ioctl) { 426 if (!dev->ops->ioctl) {
431 error = -EINVAL; 427 error = -EINVAL;
432 goto done;
433 }
434 size = dev->ops->ioctl(dev, cmd, buf);
435 }
436 if (size < 0) {
437 error = (size == -ENOIOCTLCMD ? -EINVAL : size);
438 goto done; 428 goto done;
439 } 429 }
430 size = dev->ops->ioctl(dev, cmd, buf);
431 }
432 if (size < 0) {
433 error = (size == -ENOIOCTLCMD ? -EINVAL : size);
434 goto done;
435 }
440 } 436 }
441 437
442 if (size) 438 if (size)
443 error = put_user(size, sioc_len) 439 error = put_user(size, sioc_len) ? -EFAULT : 0;
444 ? -EFAULT : 0;
445 else 440 else
446 error = 0; 441 error = 0;
447done: 442done:
@@ -449,21 +444,10 @@ done:
449 return error; 444 return error;
450} 445}
451 446
452static __inline__ void *dev_get_idx(loff_t left)
453{
454 struct list_head *p;
455
456 list_for_each(p, &atm_devs) {
457 if (!--left)
458 break;
459 }
460 return (p != &atm_devs) ? p : NULL;
461}
462
463void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos) 447void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos)
464{ 448{
465 mutex_lock(&atm_dev_mutex); 449 mutex_lock(&atm_dev_mutex);
466 return *pos ? dev_get_idx(*pos) : SEQ_START_TOKEN; 450 return seq_list_start_head(&atm_devs, *pos);
467} 451}
468 452
469void atm_dev_seq_stop(struct seq_file *seq, void *v) 453void atm_dev_seq_stop(struct seq_file *seq, void *v)
@@ -473,13 +457,5 @@ void atm_dev_seq_stop(struct seq_file *seq, void *v)
473 457
474void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 458void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
475{ 459{
476 ++*pos; 460 return seq_list_next(v, &atm_devs, pos);
477 v = (v == SEQ_START_TOKEN)
478 ? atm_devs.next : ((struct list_head *)v)->next;
479 return (v == &atm_devs) ? NULL : v;
480} 461}
481
482
483EXPORT_SYMBOL(atm_dev_register);
484EXPORT_SYMBOL(atm_dev_deregister);
485EXPORT_SYMBOL(atm_dev_lookup);
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 229921400522..ad1d28ae512b 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -2,6 +2,7 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
5 6
6#include <linux/errno.h> /* error codes */ 7#include <linux/errno.h> /* error codes */
7#include <linux/kernel.h> /* printk */ 8#include <linux/kernel.h> /* printk */
@@ -17,7 +18,6 @@
17#include "resources.h" 18#include "resources.h"
18#include "signaling.h" 19#include "signaling.h"
19 20
20
21#undef WAIT_FOR_DEMON /* #define this if system calls on SVC sockets 21#undef WAIT_FOR_DEMON /* #define this if system calls on SVC sockets
22 should block until the demon runs. 22 should block until the demon runs.
23 Danger: may cause nasty hangs if the demon 23 Danger: may cause nasty hangs if the demon
@@ -28,60 +28,59 @@ struct atm_vcc *sigd = NULL;
28static DECLARE_WAIT_QUEUE_HEAD(sigd_sleep); 28static DECLARE_WAIT_QUEUE_HEAD(sigd_sleep);
29#endif 29#endif
30 30
31
32static void sigd_put_skb(struct sk_buff *skb) 31static void sigd_put_skb(struct sk_buff *skb)
33{ 32{
34#ifdef WAIT_FOR_DEMON 33#ifdef WAIT_FOR_DEMON
35 DECLARE_WAITQUEUE(wait,current); 34 DECLARE_WAITQUEUE(wait, current);
36 35
37 add_wait_queue(&sigd_sleep,&wait); 36 add_wait_queue(&sigd_sleep, &wait);
38 while (!sigd) { 37 while (!sigd) {
39 set_current_state(TASK_UNINTERRUPTIBLE); 38 set_current_state(TASK_UNINTERRUPTIBLE);
40 pr_debug("atmsvc: waiting for signaling demon...\n"); 39 pr_debug("atmsvc: waiting for signaling daemon...\n");
41 schedule(); 40 schedule();
42 } 41 }
43 current->state = TASK_RUNNING; 42 current->state = TASK_RUNNING;
44 remove_wait_queue(&sigd_sleep,&wait); 43 remove_wait_queue(&sigd_sleep, &wait);
45#else 44#else
46 if (!sigd) { 45 if (!sigd) {
47 pr_debug("atmsvc: no signaling demon\n"); 46 pr_debug("atmsvc: no signaling daemon\n");
48 kfree_skb(skb); 47 kfree_skb(skb);
49 return; 48 return;
50 } 49 }
51#endif 50#endif
52 atm_force_charge(sigd,skb->truesize); 51 atm_force_charge(sigd, skb->truesize);
53 skb_queue_tail(&sk_atm(sigd)->sk_receive_queue,skb); 52 skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb);
54 sk_atm(sigd)->sk_data_ready(sk_atm(sigd), skb->len); 53 sk_atm(sigd)->sk_data_ready(sk_atm(sigd), skb->len);
55} 54}
56 55
57 56static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg)
58static void modify_qos(struct atm_vcc *vcc,struct atmsvc_msg *msg)
59{ 57{
60 struct sk_buff *skb; 58 struct sk_buff *skb;
61 59
62 if (test_bit(ATM_VF_RELEASED,&vcc->flags) || 60 if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
63 !test_bit(ATM_VF_READY,&vcc->flags)) 61 !test_bit(ATM_VF_READY, &vcc->flags))
64 return; 62 return;
65 msg->type = as_error; 63 msg->type = as_error;
66 if (!vcc->dev->ops->change_qos) msg->reply = -EOPNOTSUPP; 64 if (!vcc->dev->ops->change_qos)
65 msg->reply = -EOPNOTSUPP;
67 else { 66 else {
68 /* should lock VCC */ 67 /* should lock VCC */
69 msg->reply = vcc->dev->ops->change_qos(vcc,&msg->qos, 68 msg->reply = vcc->dev->ops->change_qos(vcc, &msg->qos,
70 msg->reply); 69 msg->reply);
71 if (!msg->reply) msg->type = as_okay; 70 if (!msg->reply)
71 msg->type = as_okay;
72 } 72 }
73 /* 73 /*
74 * Should probably just turn around the old skb. But the, the buffer 74 * Should probably just turn around the old skb. But the, the buffer
75 * space accounting needs to follow the change too. Maybe later. 75 * space accounting needs to follow the change too. Maybe later.
76 */ 76 */
77 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg),GFP_KERNEL))) 77 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
78 schedule(); 78 schedule();
79 *(struct atmsvc_msg *) skb_put(skb,sizeof(struct atmsvc_msg)) = *msg; 79 *(struct atmsvc_msg *)skb_put(skb, sizeof(struct atmsvc_msg)) = *msg;
80 sigd_put_skb(skb); 80 sigd_put_skb(skb);
81} 81}
82 82
83 83static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
84static int sigd_send(struct atm_vcc *vcc,struct sk_buff *skb)
85{ 84{
86 struct atmsvc_msg *msg; 85 struct atmsvc_msg *msg;
87 struct atm_vcc *session_vcc; 86 struct atm_vcc *session_vcc;
@@ -90,69 +89,68 @@ static int sigd_send(struct atm_vcc *vcc,struct sk_buff *skb)
90 msg = (struct atmsvc_msg *) skb->data; 89 msg = (struct atmsvc_msg *) skb->data;
91 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 90 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
92 vcc = *(struct atm_vcc **) &msg->vcc; 91 vcc = *(struct atm_vcc **) &msg->vcc;
93 pr_debug("sigd_send %d (0x%lx)\n",(int) msg->type, 92 pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc);
94 (unsigned long) vcc);
95 sk = sk_atm(vcc); 93 sk = sk_atm(vcc);
96 94
97 switch (msg->type) { 95 switch (msg->type) {
98 case as_okay: 96 case as_okay:
99 sk->sk_err = -msg->reply; 97 sk->sk_err = -msg->reply;
100 clear_bit(ATM_VF_WAITING, &vcc->flags); 98 clear_bit(ATM_VF_WAITING, &vcc->flags);
101 if (!*vcc->local.sas_addr.prv && 99 if (!*vcc->local.sas_addr.prv && !*vcc->local.sas_addr.pub) {
102 !*vcc->local.sas_addr.pub) { 100 vcc->local.sas_family = AF_ATMSVC;
103 vcc->local.sas_family = AF_ATMSVC; 101 memcpy(vcc->local.sas_addr.prv,
104 memcpy(vcc->local.sas_addr.prv, 102 msg->local.sas_addr.prv, ATM_ESA_LEN);
105 msg->local.sas_addr.prv,ATM_ESA_LEN); 103 memcpy(vcc->local.sas_addr.pub,
106 memcpy(vcc->local.sas_addr.pub, 104 msg->local.sas_addr.pub, ATM_E164_LEN + 1);
107 msg->local.sas_addr.pub,ATM_E164_LEN+1); 105 }
108 } 106 session_vcc = vcc->session ? vcc->session : vcc;
109 session_vcc = vcc->session ? vcc->session : vcc; 107 if (session_vcc->vpi || session_vcc->vci)
110 if (session_vcc->vpi || session_vcc->vci) break;
111 session_vcc->itf = msg->pvc.sap_addr.itf;
112 session_vcc->vpi = msg->pvc.sap_addr.vpi;
113 session_vcc->vci = msg->pvc.sap_addr.vci;
114 if (session_vcc->vpi || session_vcc->vci)
115 session_vcc->qos = msg->qos;
116 break;
117 case as_error:
118 clear_bit(ATM_VF_REGIS,&vcc->flags);
119 clear_bit(ATM_VF_READY,&vcc->flags);
120 sk->sk_err = -msg->reply;
121 clear_bit(ATM_VF_WAITING, &vcc->flags);
122 break; 108 break;
123 case as_indicate: 109 session_vcc->itf = msg->pvc.sap_addr.itf;
124 vcc = *(struct atm_vcc **) &msg->listen_vcc; 110 session_vcc->vpi = msg->pvc.sap_addr.vpi;
125 sk = sk_atm(vcc); 111 session_vcc->vci = msg->pvc.sap_addr.vci;
126 pr_debug("as_indicate!!!\n"); 112 if (session_vcc->vpi || session_vcc->vci)
127 lock_sock(sk); 113 session_vcc->qos = msg->qos;
128 if (sk_acceptq_is_full(sk)) { 114 break;
129 sigd_enq(NULL,as_reject,vcc,NULL,NULL); 115 case as_error:
130 dev_kfree_skb(skb); 116 clear_bit(ATM_VF_REGIS, &vcc->flags);
131 goto as_indicate_complete; 117 clear_bit(ATM_VF_READY, &vcc->flags);
132 } 118 sk->sk_err = -msg->reply;
133 sk->sk_ack_backlog++; 119 clear_bit(ATM_VF_WAITING, &vcc->flags);
134 skb_queue_tail(&sk->sk_receive_queue, skb); 120 break;
135 pr_debug("waking sk->sk_sleep 0x%p\n", sk->sk_sleep); 121 case as_indicate:
136 sk->sk_state_change(sk); 122 vcc = *(struct atm_vcc **)&msg->listen_vcc;
123 sk = sk_atm(vcc);
124 pr_debug("as_indicate!!!\n");
125 lock_sock(sk);
126 if (sk_acceptq_is_full(sk)) {
127 sigd_enq(NULL, as_reject, vcc, NULL, NULL);
128 dev_kfree_skb(skb);
129 goto as_indicate_complete;
130 }
131 sk->sk_ack_backlog++;
132 skb_queue_tail(&sk->sk_receive_queue, skb);
133 pr_debug("waking sk->sk_sleep 0x%p\n", sk->sk_sleep);
134 sk->sk_state_change(sk);
137as_indicate_complete: 135as_indicate_complete:
138 release_sock(sk); 136 release_sock(sk);
139 return 0; 137 return 0;
140 case as_close: 138 case as_close:
141 set_bit(ATM_VF_RELEASED,&vcc->flags); 139 set_bit(ATM_VF_RELEASED, &vcc->flags);
142 vcc_release_async(vcc, msg->reply); 140 vcc_release_async(vcc, msg->reply);
143 goto out; 141 goto out;
144 case as_modify: 142 case as_modify:
145 modify_qos(vcc,msg); 143 modify_qos(vcc, msg);
146 break; 144 break;
147 case as_addparty: 145 case as_addparty:
148 case as_dropparty: 146 case as_dropparty:
149 sk->sk_err_soft = msg->reply; /* < 0 failure, otherwise ep_ref */ 147 sk->sk_err_soft = msg->reply;
150 clear_bit(ATM_VF_WAITING, &vcc->flags); 148 /* < 0 failure, otherwise ep_ref */
151 break; 149 clear_bit(ATM_VF_WAITING, &vcc->flags);
152 default: 150 break;
153 printk(KERN_ALERT "sigd_send: bad message type %d\n", 151 default:
154 (int) msg->type); 152 pr_alert("bad message type %d\n", (int)msg->type);
155 return -EINVAL; 153 return -EINVAL;
156 } 154 }
157 sk->sk_state_change(sk); 155 sk->sk_state_change(sk);
158out: 156out:
@@ -160,48 +158,52 @@ out:
160 return 0; 158 return 0;
161} 159}
162 160
163 161void sigd_enq2(struct atm_vcc *vcc, enum atmsvc_msg_type type,
164void sigd_enq2(struct atm_vcc *vcc,enum atmsvc_msg_type type, 162 struct atm_vcc *listen_vcc, const struct sockaddr_atmpvc *pvc,
165 struct atm_vcc *listen_vcc,const struct sockaddr_atmpvc *pvc, 163 const struct sockaddr_atmsvc *svc, const struct atm_qos *qos,
166 const struct sockaddr_atmsvc *svc,const struct atm_qos *qos,int reply) 164 int reply)
167{ 165{
168 struct sk_buff *skb; 166 struct sk_buff *skb;
169 struct atmsvc_msg *msg; 167 struct atmsvc_msg *msg;
170 static unsigned session = 0; 168 static unsigned session = 0;
171 169
172 pr_debug("sigd_enq %d (0x%p)\n",(int) type,vcc); 170 pr_debug("%d (0x%p)\n", (int)type, vcc);
173 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg),GFP_KERNEL))) 171 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
174 schedule(); 172 schedule();
175 msg = (struct atmsvc_msg *) skb_put(skb,sizeof(struct atmsvc_msg)); 173 msg = (struct atmsvc_msg *)skb_put(skb, sizeof(struct atmsvc_msg));
176 memset(msg,0,sizeof(*msg)); 174 memset(msg, 0, sizeof(*msg));
177 msg->type = type; 175 msg->type = type;
178 *(struct atm_vcc **) &msg->vcc = vcc; 176 *(struct atm_vcc **) &msg->vcc = vcc;
179 *(struct atm_vcc **) &msg->listen_vcc = listen_vcc; 177 *(struct atm_vcc **) &msg->listen_vcc = listen_vcc;
180 msg->reply = reply; 178 msg->reply = reply;
181 if (qos) msg->qos = *qos; 179 if (qos)
182 if (vcc) msg->sap = vcc->sap; 180 msg->qos = *qos;
183 if (svc) msg->svc = *svc; 181 if (vcc)
184 if (vcc) msg->local = vcc->local; 182 msg->sap = vcc->sap;
185 if (pvc) msg->pvc = *pvc; 183 if (svc)
184 msg->svc = *svc;
185 if (vcc)
186 msg->local = vcc->local;
187 if (pvc)
188 msg->pvc = *pvc;
186 if (vcc) { 189 if (vcc) {
187 if (type == as_connect && test_bit(ATM_VF_SESSION, &vcc->flags)) 190 if (type == as_connect && test_bit(ATM_VF_SESSION, &vcc->flags))
188 msg->session = ++session; 191 msg->session = ++session;
189 /* every new pmp connect gets the next session number */ 192 /* every new pmp connect gets the next session number */
190 } 193 }
191 sigd_put_skb(skb); 194 sigd_put_skb(skb);
192 if (vcc) set_bit(ATM_VF_REGIS,&vcc->flags); 195 if (vcc)
196 set_bit(ATM_VF_REGIS, &vcc->flags);
193} 197}
194 198
195 199void sigd_enq(struct atm_vcc *vcc, enum atmsvc_msg_type type,
196void sigd_enq(struct atm_vcc *vcc,enum atmsvc_msg_type type, 200 struct atm_vcc *listen_vcc, const struct sockaddr_atmpvc *pvc,
197 struct atm_vcc *listen_vcc,const struct sockaddr_atmpvc *pvc, 201 const struct sockaddr_atmsvc *svc)
198 const struct sockaddr_atmsvc *svc)
199{ 202{
200 sigd_enq2(vcc,type,listen_vcc,pvc,svc,vcc ? &vcc->qos : NULL,0); 203 sigd_enq2(vcc, type, listen_vcc, pvc, svc, vcc ? &vcc->qos : NULL, 0);
201 /* other ISP applications may use "reply" */ 204 /* other ISP applications may use "reply" */
202} 205}
203 206
204
205static void purge_vcc(struct atm_vcc *vcc) 207static void purge_vcc(struct atm_vcc *vcc)
206{ 208{
207 if (sk_atm(vcc)->sk_family == PF_ATMSVC && 209 if (sk_atm(vcc)->sk_family == PF_ATMSVC &&
@@ -212,21 +214,20 @@ static void purge_vcc(struct atm_vcc *vcc)
212 } 214 }
213} 215}
214 216
215
216static void sigd_close(struct atm_vcc *vcc) 217static void sigd_close(struct atm_vcc *vcc)
217{ 218{
218 struct hlist_node *node; 219 struct hlist_node *node;
219 struct sock *s; 220 struct sock *s;
220 int i; 221 int i;
221 222
222 pr_debug("sigd_close\n"); 223 pr_debug("\n");
223 sigd = NULL; 224 sigd = NULL;
224 if (skb_peek(&sk_atm(vcc)->sk_receive_queue)) 225 if (skb_peek(&sk_atm(vcc)->sk_receive_queue))
225 printk(KERN_ERR "sigd_close: closing with requests pending\n"); 226 pr_err("closing with requests pending\n");
226 skb_queue_purge(&sk_atm(vcc)->sk_receive_queue); 227 skb_queue_purge(&sk_atm(vcc)->sk_receive_queue);
227 228
228 read_lock(&vcc_sklist_lock); 229 read_lock(&vcc_sklist_lock);
229 for(i = 0; i < VCC_HTABLE_SIZE; ++i) { 230 for (i = 0; i < VCC_HTABLE_SIZE; ++i) {
230 struct hlist_head *head = &vcc_hash[i]; 231 struct hlist_head *head = &vcc_hash[i];
231 232
232 sk_for_each(s, node, head) { 233 sk_for_each(s, node, head) {
@@ -238,13 +239,11 @@ static void sigd_close(struct atm_vcc *vcc)
238 read_unlock(&vcc_sklist_lock); 239 read_unlock(&vcc_sklist_lock);
239} 240}
240 241
241
242static struct atmdev_ops sigd_dev_ops = { 242static struct atmdev_ops sigd_dev_ops = {
243 .close = sigd_close, 243 .close = sigd_close,
244 .send = sigd_send 244 .send = sigd_send
245}; 245};
246 246
247
248static struct atm_dev sigd_dev = { 247static struct atm_dev sigd_dev = {
249 .ops = &sigd_dev_ops, 248 .ops = &sigd_dev_ops,
250 .type = "sig", 249 .type = "sig",
@@ -252,16 +251,16 @@ static struct atm_dev sigd_dev = {
252 .lock = __SPIN_LOCK_UNLOCKED(sigd_dev.lock) 251 .lock = __SPIN_LOCK_UNLOCKED(sigd_dev.lock)
253}; 252};
254 253
255
256int sigd_attach(struct atm_vcc *vcc) 254int sigd_attach(struct atm_vcc *vcc)
257{ 255{
258 if (sigd) return -EADDRINUSE; 256 if (sigd)
259 pr_debug("sigd_attach\n"); 257 return -EADDRINUSE;
258 pr_debug("\n");
260 sigd = vcc; 259 sigd = vcc;
261 vcc->dev = &sigd_dev; 260 vcc->dev = &sigd_dev;
262 vcc_insert_socket(sk_atm(vcc)); 261 vcc_insert_socket(sk_atm(vcc));
263 set_bit(ATM_VF_META,&vcc->flags); 262 set_bit(ATM_VF_META, &vcc->flags);
264 set_bit(ATM_VF_READY,&vcc->flags); 263 set_bit(ATM_VF_READY, &vcc->flags);
265#ifdef WAIT_FOR_DEMON 264#ifdef WAIT_FOR_DEMON
266 wake_up(&sigd_sleep); 265 wake_up(&sigd_sleep);
267#endif 266#endif
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 66e1d9b3e5de..3ba9a45a51ac 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -2,6 +2,7 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
5 6
6#include <linux/string.h> 7#include <linux/string.h>
7#include <linux/net.h> /* struct socket, struct proto_ops */ 8#include <linux/net.h> /* struct socket, struct proto_ops */
@@ -18,14 +19,15 @@
18#include <linux/atmdev.h> 19#include <linux/atmdev.h>
19#include <linux/bitops.h> 20#include <linux/bitops.h>
20#include <net/sock.h> /* for sock_no_* */ 21#include <net/sock.h> /* for sock_no_* */
21#include <asm/uaccess.h> 22#include <linux/uaccess.h>
22 23
23#include "resources.h" 24#include "resources.h"
24#include "common.h" /* common for PVCs and SVCs */ 25#include "common.h" /* common for PVCs and SVCs */
25#include "signaling.h" 26#include "signaling.h"
26#include "addr.h" 27#include "addr.h"
27 28
28static int svc_create(struct net *net, struct socket *sock, int protocol, int kern); 29static int svc_create(struct net *net, struct socket *sock, int protocol,
30 int kern);
29 31
30/* 32/*
31 * Note: since all this is still nicely synchronized with the signaling demon, 33 * Note: since all this is still nicely synchronized with the signaling demon,
@@ -34,25 +36,25 @@ static int svc_create(struct net *net, struct socket *sock, int protocol, int ke
34 */ 36 */
35 37
36 38
37static int svc_shutdown(struct socket *sock,int how) 39static int svc_shutdown(struct socket *sock, int how)
38{ 40{
39 return 0; 41 return 0;
40} 42}
41 43
42
43static void svc_disconnect(struct atm_vcc *vcc) 44static void svc_disconnect(struct atm_vcc *vcc)
44{ 45{
45 DEFINE_WAIT(wait); 46 DEFINE_WAIT(wait);
46 struct sk_buff *skb; 47 struct sk_buff *skb;
47 struct sock *sk = sk_atm(vcc); 48 struct sock *sk = sk_atm(vcc);
48 49
49 pr_debug("svc_disconnect %p\n",vcc); 50 pr_debug("%p\n", vcc);
50 if (test_bit(ATM_VF_REGIS,&vcc->flags)) { 51 if (test_bit(ATM_VF_REGIS, &vcc->flags)) {
51 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 52 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
52 sigd_enq(vcc,as_close,NULL,NULL,NULL); 53 sigd_enq(vcc, as_close, NULL, NULL, NULL);
53 while (!test_bit(ATM_VF_RELEASED,&vcc->flags) && sigd) { 54 while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
54 schedule(); 55 schedule();
55 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 56 prepare_to_wait(sk->sk_sleep, &wait,
57 TASK_UNINTERRUPTIBLE);
56 } 58 }
57 finish_wait(sk->sk_sleep, &wait); 59 finish_wait(sk->sk_sleep, &wait);
58 } 60 }
@@ -61,35 +63,35 @@ static void svc_disconnect(struct atm_vcc *vcc)
61 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 63 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
62 atm_return(vcc, skb->truesize); 64 atm_return(vcc, skb->truesize);
63 pr_debug("LISTEN REL\n"); 65 pr_debug("LISTEN REL\n");
64 sigd_enq2(NULL,as_reject,vcc,NULL,NULL,&vcc->qos,0); 66 sigd_enq2(NULL, as_reject, vcc, NULL, NULL, &vcc->qos, 0);
65 dev_kfree_skb(skb); 67 dev_kfree_skb(skb);
66 } 68 }
67 clear_bit(ATM_VF_REGIS, &vcc->flags); 69 clear_bit(ATM_VF_REGIS, &vcc->flags);
68 /* ... may retry later */ 70 /* ... may retry later */
69} 71}
70 72
71
72static int svc_release(struct socket *sock) 73static int svc_release(struct socket *sock)
73{ 74{
74 struct sock *sk = sock->sk; 75 struct sock *sk = sock->sk;
75 struct atm_vcc *vcc; 76 struct atm_vcc *vcc;
76 77
77 if (sk) { 78 if (sk) {
78 vcc = ATM_SD(sock); 79 vcc = ATM_SD(sock);
79 pr_debug("svc_release %p\n", vcc); 80 pr_debug("%p\n", vcc);
80 clear_bit(ATM_VF_READY, &vcc->flags); 81 clear_bit(ATM_VF_READY, &vcc->flags);
81 /* VCC pointer is used as a reference, so we must not free it 82 /*
82 (thereby subjecting it to re-use) before all pending connections 83 * VCC pointer is used as a reference,
83 are closed */ 84 * so we must not free it (thereby subjecting it to re-use)
85 * before all pending connections are closed
86 */
84 svc_disconnect(vcc); 87 svc_disconnect(vcc);
85 vcc_release(sock); 88 vcc_release(sock);
86 } 89 }
87 return 0; 90 return 0;
88} 91}
89 92
90 93static int svc_bind(struct socket *sock, struct sockaddr *sockaddr,
91static int svc_bind(struct socket *sock,struct sockaddr *sockaddr, 94 int sockaddr_len)
92 int sockaddr_len)
93{ 95{
94 DEFINE_WAIT(wait); 96 DEFINE_WAIT(wait);
95 struct sock *sk = sock->sk; 97 struct sock *sk = sock->sk;
@@ -114,38 +116,37 @@ static int svc_bind(struct socket *sock,struct sockaddr *sockaddr,
114 error = -EAFNOSUPPORT; 116 error = -EAFNOSUPPORT;
115 goto out; 117 goto out;
116 } 118 }
117 clear_bit(ATM_VF_BOUND,&vcc->flags); 119 clear_bit(ATM_VF_BOUND, &vcc->flags);
118 /* failing rebind will kill old binding */ 120 /* failing rebind will kill old binding */
119 /* @@@ check memory (de)allocation on rebind */ 121 /* @@@ check memory (de)allocation on rebind */
120 if (!test_bit(ATM_VF_HASQOS,&vcc->flags)) { 122 if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
121 error = -EBADFD; 123 error = -EBADFD;
122 goto out; 124 goto out;
123 } 125 }
124 vcc->local = *addr; 126 vcc->local = *addr;
125 set_bit(ATM_VF_WAITING, &vcc->flags); 127 set_bit(ATM_VF_WAITING, &vcc->flags);
126 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 128 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
127 sigd_enq(vcc,as_bind,NULL,NULL,&vcc->local); 129 sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local);
128 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 130 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
129 schedule(); 131 schedule();
130 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 132 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
131 } 133 }
132 finish_wait(sk->sk_sleep, &wait); 134 finish_wait(sk->sk_sleep, &wait);
133 clear_bit(ATM_VF_REGIS,&vcc->flags); /* doesn't count */ 135 clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */
134 if (!sigd) { 136 if (!sigd) {
135 error = -EUNATCH; 137 error = -EUNATCH;
136 goto out; 138 goto out;
137 } 139 }
138 if (!sk->sk_err) 140 if (!sk->sk_err)
139 set_bit(ATM_VF_BOUND,&vcc->flags); 141 set_bit(ATM_VF_BOUND, &vcc->flags);
140 error = -sk->sk_err; 142 error = -sk->sk_err;
141out: 143out:
142 release_sock(sk); 144 release_sock(sk);
143 return error; 145 return error;
144} 146}
145 147
146 148static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
147static int svc_connect(struct socket *sock,struct sockaddr *sockaddr, 149 int sockaddr_len, int flags)
148 int sockaddr_len,int flags)
149{ 150{
150 DEFINE_WAIT(wait); 151 DEFINE_WAIT(wait);
151 struct sock *sk = sock->sk; 152 struct sock *sk = sock->sk;
@@ -153,7 +154,7 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
153 struct atm_vcc *vcc = ATM_SD(sock); 154 struct atm_vcc *vcc = ATM_SD(sock);
154 int error; 155 int error;
155 156
156 pr_debug("svc_connect %p\n",vcc); 157 pr_debug("%p\n", vcc);
157 lock_sock(sk); 158 lock_sock(sk);
158 if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) { 159 if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) {
159 error = -EINVAL; 160 error = -EINVAL;
@@ -201,7 +202,7 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
201 vcc->remote = *addr; 202 vcc->remote = *addr;
202 set_bit(ATM_VF_WAITING, &vcc->flags); 203 set_bit(ATM_VF_WAITING, &vcc->flags);
203 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 204 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
204 sigd_enq(vcc,as_connect,NULL,NULL,&vcc->remote); 205 sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote);
205 if (flags & O_NONBLOCK) { 206 if (flags & O_NONBLOCK) {
206 finish_wait(sk->sk_sleep, &wait); 207 finish_wait(sk->sk_sleep, &wait);
207 sock->state = SS_CONNECTING; 208 sock->state = SS_CONNECTING;
@@ -212,7 +213,8 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
212 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 213 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
213 schedule(); 214 schedule();
214 if (!signal_pending(current)) { 215 if (!signal_pending(current)) {
215 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 216 prepare_to_wait(sk->sk_sleep, &wait,
217 TASK_INTERRUPTIBLE);
216 continue; 218 continue;
217 } 219 }
218 pr_debug("*ABORT*\n"); 220 pr_debug("*ABORT*\n");
@@ -228,20 +230,22 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
228 * Kernel <--okay---- Demon 230 * Kernel <--okay---- Demon
229 * Kernel <--close--- Demon 231 * Kernel <--close--- Demon
230 */ 232 */
231 sigd_enq(vcc,as_close,NULL,NULL,NULL); 233 sigd_enq(vcc, as_close, NULL, NULL, NULL);
232 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 234 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
233 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 235 prepare_to_wait(sk->sk_sleep, &wait,
236 TASK_INTERRUPTIBLE);
234 schedule(); 237 schedule();
235 } 238 }
236 if (!sk->sk_err) 239 if (!sk->sk_err)
237 while (!test_bit(ATM_VF_RELEASED,&vcc->flags) 240 while (!test_bit(ATM_VF_RELEASED, &vcc->flags) &&
238 && sigd) { 241 sigd) {
239 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 242 prepare_to_wait(sk->sk_sleep, &wait,
243 TASK_INTERRUPTIBLE);
240 schedule(); 244 schedule();
241 } 245 }
242 clear_bit(ATM_VF_REGIS,&vcc->flags); 246 clear_bit(ATM_VF_REGIS, &vcc->flags);
243 clear_bit(ATM_VF_RELEASED,&vcc->flags); 247 clear_bit(ATM_VF_RELEASED, &vcc->flags);
244 clear_bit(ATM_VF_CLOSE,&vcc->flags); 248 clear_bit(ATM_VF_CLOSE, &vcc->flags);
245 /* we're gone now but may connect later */ 249 /* we're gone now but may connect later */
246 error = -EINTR; 250 error = -EINTR;
247 break; 251 break;
@@ -269,37 +273,37 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
269/* 273/*
270 * #endif 274 * #endif
271 */ 275 */
272 if (!(error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci))) 276 error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci);
277 if (!error)
273 sock->state = SS_CONNECTED; 278 sock->state = SS_CONNECTED;
274 else 279 else
275 (void) svc_disconnect(vcc); 280 (void)svc_disconnect(vcc);
276out: 281out:
277 release_sock(sk); 282 release_sock(sk);
278 return error; 283 return error;
279} 284}
280 285
281 286static int svc_listen(struct socket *sock, int backlog)
282static int svc_listen(struct socket *sock,int backlog)
283{ 287{
284 DEFINE_WAIT(wait); 288 DEFINE_WAIT(wait);
285 struct sock *sk = sock->sk; 289 struct sock *sk = sock->sk;
286 struct atm_vcc *vcc = ATM_SD(sock); 290 struct atm_vcc *vcc = ATM_SD(sock);
287 int error; 291 int error;
288 292
289 pr_debug("svc_listen %p\n",vcc); 293 pr_debug("%p\n", vcc);
290 lock_sock(sk); 294 lock_sock(sk);
291 /* let server handle listen on unbound sockets */ 295 /* let server handle listen on unbound sockets */
292 if (test_bit(ATM_VF_SESSION,&vcc->flags)) { 296 if (test_bit(ATM_VF_SESSION, &vcc->flags)) {
293 error = -EINVAL; 297 error = -EINVAL;
294 goto out; 298 goto out;
295 } 299 }
296 if (test_bit(ATM_VF_LISTEN, &vcc->flags)) { 300 if (test_bit(ATM_VF_LISTEN, &vcc->flags)) {
297 error = -EADDRINUSE; 301 error = -EADDRINUSE;
298 goto out; 302 goto out;
299 } 303 }
300 set_bit(ATM_VF_WAITING, &vcc->flags); 304 set_bit(ATM_VF_WAITING, &vcc->flags);
301 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 305 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
302 sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local); 306 sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local);
303 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 307 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
304 schedule(); 308 schedule();
305 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 309 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
@@ -309,7 +313,7 @@ static int svc_listen(struct socket *sock,int backlog)
309 error = -EUNATCH; 313 error = -EUNATCH;
310 goto out; 314 goto out;
311 } 315 }
312 set_bit(ATM_VF_LISTEN,&vcc->flags); 316 set_bit(ATM_VF_LISTEN, &vcc->flags);
313 vcc_insert_socket(sk); 317 vcc_insert_socket(sk);
314 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; 318 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
315 error = -sk->sk_err; 319 error = -sk->sk_err;
@@ -318,8 +322,7 @@ out:
318 return error; 322 return error;
319} 323}
320 324
321 325static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
322static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
323{ 326{
324 struct sock *sk = sock->sk; 327 struct sock *sk = sock->sk;
325 struct sk_buff *skb; 328 struct sk_buff *skb;
@@ -336,15 +339,16 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
336 339
337 new_vcc = ATM_SD(newsock); 340 new_vcc = ATM_SD(newsock);
338 341
339 pr_debug("svc_accept %p -> %p\n",old_vcc,new_vcc); 342 pr_debug("%p -> %p\n", old_vcc, new_vcc);
340 while (1) { 343 while (1) {
341 DEFINE_WAIT(wait); 344 DEFINE_WAIT(wait);
342 345
343 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 346 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
344 while (!(skb = skb_dequeue(&sk->sk_receive_queue)) && 347 while (!(skb = skb_dequeue(&sk->sk_receive_queue)) &&
345 sigd) { 348 sigd) {
346 if (test_bit(ATM_VF_RELEASED,&old_vcc->flags)) break; 349 if (test_bit(ATM_VF_RELEASED, &old_vcc->flags))
347 if (test_bit(ATM_VF_CLOSE,&old_vcc->flags)) { 350 break;
351 if (test_bit(ATM_VF_CLOSE, &old_vcc->flags)) {
348 error = -sk->sk_err; 352 error = -sk->sk_err;
349 break; 353 break;
350 } 354 }
@@ -359,7 +363,8 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
359 error = -ERESTARTSYS; 363 error = -ERESTARTSYS;
360 break; 364 break;
361 } 365 }
362 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 366 prepare_to_wait(sk->sk_sleep, &wait,
367 TASK_INTERRUPTIBLE);
363 } 368 }
364 finish_wait(sk->sk_sleep, &wait); 369 finish_wait(sk->sk_sleep, &wait);
365 if (error) 370 if (error)
@@ -368,31 +373,34 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
368 error = -EUNATCH; 373 error = -EUNATCH;
369 goto out; 374 goto out;
370 } 375 }
371 msg = (struct atmsvc_msg *) skb->data; 376 msg = (struct atmsvc_msg *)skb->data;
372 new_vcc->qos = msg->qos; 377 new_vcc->qos = msg->qos;
373 set_bit(ATM_VF_HASQOS,&new_vcc->flags); 378 set_bit(ATM_VF_HASQOS, &new_vcc->flags);
374 new_vcc->remote = msg->svc; 379 new_vcc->remote = msg->svc;
375 new_vcc->local = msg->local; 380 new_vcc->local = msg->local;
376 new_vcc->sap = msg->sap; 381 new_vcc->sap = msg->sap;
377 error = vcc_connect(newsock, msg->pvc.sap_addr.itf, 382 error = vcc_connect(newsock, msg->pvc.sap_addr.itf,
378 msg->pvc.sap_addr.vpi, msg->pvc.sap_addr.vci); 383 msg->pvc.sap_addr.vpi,
384 msg->pvc.sap_addr.vci);
379 dev_kfree_skb(skb); 385 dev_kfree_skb(skb);
380 sk->sk_ack_backlog--; 386 sk->sk_ack_backlog--;
381 if (error) { 387 if (error) {
382 sigd_enq2(NULL,as_reject,old_vcc,NULL,NULL, 388 sigd_enq2(NULL, as_reject, old_vcc, NULL, NULL,
383 &old_vcc->qos,error); 389 &old_vcc->qos, error);
384 error = error == -EAGAIN ? -EBUSY : error; 390 error = error == -EAGAIN ? -EBUSY : error;
385 goto out; 391 goto out;
386 } 392 }
387 /* wait should be short, so we ignore the non-blocking flag */ 393 /* wait should be short, so we ignore the non-blocking flag */
388 set_bit(ATM_VF_WAITING, &new_vcc->flags); 394 set_bit(ATM_VF_WAITING, &new_vcc->flags);
389 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 395 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait,
390 sigd_enq(new_vcc,as_accept,old_vcc,NULL,NULL); 396 TASK_UNINTERRUPTIBLE);
397 sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL);
391 while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) { 398 while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) {
392 release_sock(sk); 399 release_sock(sk);
393 schedule(); 400 schedule();
394 lock_sock(sk); 401 lock_sock(sk);
395 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 402 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait,
403 TASK_UNINTERRUPTIBLE);
396 } 404 }
397 finish_wait(sk_atm(new_vcc)->sk_sleep, &wait); 405 finish_wait(sk_atm(new_vcc)->sk_sleep, &wait);
398 if (!sigd) { 406 if (!sigd) {
@@ -412,39 +420,37 @@ out:
412 return error; 420 return error;
413} 421}
414 422
415 423static int svc_getname(struct socket *sock, struct sockaddr *sockaddr,
416static int svc_getname(struct socket *sock,struct sockaddr *sockaddr, 424 int *sockaddr_len, int peer)
417 int *sockaddr_len,int peer)
418{ 425{
419 struct sockaddr_atmsvc *addr; 426 struct sockaddr_atmsvc *addr;
420 427
421 *sockaddr_len = sizeof(struct sockaddr_atmsvc); 428 *sockaddr_len = sizeof(struct sockaddr_atmsvc);
422 addr = (struct sockaddr_atmsvc *) sockaddr; 429 addr = (struct sockaddr_atmsvc *) sockaddr;
423 memcpy(addr,peer ? &ATM_SD(sock)->remote : &ATM_SD(sock)->local, 430 memcpy(addr, peer ? &ATM_SD(sock)->remote : &ATM_SD(sock)->local,
424 sizeof(struct sockaddr_atmsvc)); 431 sizeof(struct sockaddr_atmsvc));
425 return 0; 432 return 0;
426} 433}
427 434
428 435int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
429int svc_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
430{ 436{
431 struct sock *sk = sk_atm(vcc); 437 struct sock *sk = sk_atm(vcc);
432 DEFINE_WAIT(wait); 438 DEFINE_WAIT(wait);
433 439
434 set_bit(ATM_VF_WAITING, &vcc->flags); 440 set_bit(ATM_VF_WAITING, &vcc->flags);
435 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 441 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
436 sigd_enq2(vcc,as_modify,NULL,NULL,&vcc->local,qos,0); 442 sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0);
437 while (test_bit(ATM_VF_WAITING, &vcc->flags) && 443 while (test_bit(ATM_VF_WAITING, &vcc->flags) &&
438 !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { 444 !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
439 schedule(); 445 schedule();
440 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 446 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
441 } 447 }
442 finish_wait(sk->sk_sleep, &wait); 448 finish_wait(sk->sk_sleep, &wait);
443 if (!sigd) return -EUNATCH; 449 if (!sigd)
450 return -EUNATCH;
444 return -sk->sk_err; 451 return -sk->sk_err;
445} 452}
446 453
447
448static int svc_setsockopt(struct socket *sock, int level, int optname, 454static int svc_setsockopt(struct socket *sock, int level, int optname,
449 char __user *optval, unsigned int optlen) 455 char __user *optval, unsigned int optlen)
450{ 456{
@@ -454,37 +460,35 @@ static int svc_setsockopt(struct socket *sock, int level, int optname,
454 460
455 lock_sock(sk); 461 lock_sock(sk);
456 switch (optname) { 462 switch (optname) {
457 case SO_ATMSAP: 463 case SO_ATMSAP:
458 if (level != SOL_ATM || optlen != sizeof(struct atm_sap)) { 464 if (level != SOL_ATM || optlen != sizeof(struct atm_sap)) {
459 error = -EINVAL; 465 error = -EINVAL;
460 goto out; 466 goto out;
461 } 467 }
462 if (copy_from_user(&vcc->sap, optval, optlen)) { 468 if (copy_from_user(&vcc->sap, optval, optlen)) {
463 error = -EFAULT; 469 error = -EFAULT;
464 goto out; 470 goto out;
465 } 471 }
466 set_bit(ATM_VF_HASSAP, &vcc->flags); 472 set_bit(ATM_VF_HASSAP, &vcc->flags);
467 break; 473 break;
468 case SO_MULTIPOINT: 474 case SO_MULTIPOINT:
469 if (level != SOL_ATM || optlen != sizeof(int)) { 475 if (level != SOL_ATM || optlen != sizeof(int)) {
470 error = -EINVAL; 476 error = -EINVAL;
471 goto out; 477 goto out;
472 } 478 }
473 if (get_user(value, (int __user *) optval)) { 479 if (get_user(value, (int __user *)optval)) {
474 error = -EFAULT; 480 error = -EFAULT;
475 goto out; 481 goto out;
476 } 482 }
477 if (value == 1) { 483 if (value == 1)
478 set_bit(ATM_VF_SESSION, &vcc->flags); 484 set_bit(ATM_VF_SESSION, &vcc->flags);
479 } else if (value == 0) { 485 else if (value == 0)
480 clear_bit(ATM_VF_SESSION, &vcc->flags); 486 clear_bit(ATM_VF_SESSION, &vcc->flags);
481 } else { 487 else
482 error = -EINVAL; 488 error = -EINVAL;
483 } 489 break;
484 break; 490 default:
485 default: 491 error = vcc_setsockopt(sock, level, optname, optval, optlen);
486 error = vcc_setsockopt(sock, level, optname,
487 optval, optlen);
488 } 492 }
489 493
490out: 494out:
@@ -492,9 +496,8 @@ out:
492 return error; 496 return error;
493} 497}
494 498
495 499static int svc_getsockopt(struct socket *sock, int level, int optname,
496static int svc_getsockopt(struct socket *sock,int level,int optname, 500 char __user *optval, int __user *optlen)
497 char __user *optval,int __user *optlen)
498{ 501{
499 struct sock *sk = sock->sk; 502 struct sock *sk = sock->sk;
500 int error = 0, len; 503 int error = 0, len;
@@ -521,7 +524,6 @@ out:
521 return error; 524 return error;
522} 525}
523 526
524
525static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr, 527static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
526 int sockaddr_len, int flags) 528 int sockaddr_len, int flags)
527{ 529{
@@ -540,7 +542,7 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
540 error = -EINPROGRESS; 542 error = -EINPROGRESS;
541 goto out; 543 goto out;
542 } 544 }
543 pr_debug("svc_addparty added wait queue\n"); 545 pr_debug("added wait queue\n");
544 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 546 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
545 schedule(); 547 schedule();
546 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 548 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
@@ -552,7 +554,6 @@ out:
552 return error; 554 return error;
553} 555}
554 556
555
556static int svc_dropparty(struct socket *sock, int ep_ref) 557static int svc_dropparty(struct socket *sock, int ep_ref)
557{ 558{
558 DEFINE_WAIT(wait); 559 DEFINE_WAIT(wait);
@@ -579,7 +580,6 @@ out:
579 return error; 580 return error;
580} 581}
581 582
582
583static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 583static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
584{ 584{
585 int error, ep_ref; 585 int error, ep_ref;
@@ -587,29 +587,31 @@ static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
587 struct atm_vcc *vcc = ATM_SD(sock); 587 struct atm_vcc *vcc = ATM_SD(sock);
588 588
589 switch (cmd) { 589 switch (cmd) {
590 case ATM_ADDPARTY: 590 case ATM_ADDPARTY:
591 if (!test_bit(ATM_VF_SESSION, &vcc->flags)) 591 if (!test_bit(ATM_VF_SESSION, &vcc->flags))
592 return -EINVAL; 592 return -EINVAL;
593 if (copy_from_user(&sa, (void __user *) arg, sizeof(sa))) 593 if (copy_from_user(&sa, (void __user *) arg, sizeof(sa)))
594 return -EFAULT; 594 return -EFAULT;
595 error = svc_addparty(sock, (struct sockaddr *) &sa, sizeof(sa), 0); 595 error = svc_addparty(sock, (struct sockaddr *)&sa, sizeof(sa),
596 break; 596 0);
597 case ATM_DROPPARTY: 597 break;
598 if (!test_bit(ATM_VF_SESSION, &vcc->flags)) 598 case ATM_DROPPARTY:
599 return -EINVAL; 599 if (!test_bit(ATM_VF_SESSION, &vcc->flags))
600 if (copy_from_user(&ep_ref, (void __user *) arg, sizeof(int))) 600 return -EINVAL;
601 return -EFAULT; 601 if (copy_from_user(&ep_ref, (void __user *) arg, sizeof(int)))
602 error = svc_dropparty(sock, ep_ref); 602 return -EFAULT;
603 break; 603 error = svc_dropparty(sock, ep_ref);
604 default: 604 break;
605 error = vcc_ioctl(sock, cmd, arg); 605 default:
606 error = vcc_ioctl(sock, cmd, arg);
606 } 607 }
607 608
608 return error; 609 return error;
609} 610}
610 611
611#ifdef CONFIG_COMPAT 612#ifdef CONFIG_COMPAT
612static int svc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 613static int svc_compat_ioctl(struct socket *sock, unsigned int cmd,
614 unsigned long arg)
613{ 615{
614 /* The definition of ATM_ADDPARTY uses the size of struct atm_iobuf. 616 /* The definition of ATM_ADDPARTY uses the size of struct atm_iobuf.
615 But actually it takes a struct sockaddr_atmsvc, which doesn't need 617 But actually it takes a struct sockaddr_atmsvc, which doesn't need
@@ -660,13 +662,13 @@ static int svc_create(struct net *net, struct socket *sock, int protocol,
660 662
661 sock->ops = &svc_proto_ops; 663 sock->ops = &svc_proto_ops;
662 error = vcc_create(net, sock, protocol, AF_ATMSVC); 664 error = vcc_create(net, sock, protocol, AF_ATMSVC);
663 if (error) return error; 665 if (error)
666 return error;
664 ATM_SD(sock)->local.sas_family = AF_ATMSVC; 667 ATM_SD(sock)->local.sas_family = AF_ATMSVC;
665 ATM_SD(sock)->remote.sas_family = AF_ATMSVC; 668 ATM_SD(sock)->remote.sas_family = AF_ATMSVC;
666 return 0; 669 return 0;
667} 670}
668 671
669
670static const struct net_proto_family svc_family_ops = { 672static const struct net_proto_family svc_family_ops = {
671 .family = PF_ATMSVC, 673 .family = PF_ATMSVC,
672 .create = svc_create, 674 .create = svc_create,
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 5588ba69c468..a5beedf43e2d 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1863,25 +1863,13 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1863static void *ax25_info_start(struct seq_file *seq, loff_t *pos) 1863static void *ax25_info_start(struct seq_file *seq, loff_t *pos)
1864 __acquires(ax25_list_lock) 1864 __acquires(ax25_list_lock)
1865{ 1865{
1866 struct ax25_cb *ax25;
1867 struct hlist_node *node;
1868 int i = 0;
1869
1870 spin_lock_bh(&ax25_list_lock); 1866 spin_lock_bh(&ax25_list_lock);
1871 ax25_for_each(ax25, node, &ax25_list) { 1867 return seq_hlist_start(&ax25_list, *pos);
1872 if (i == *pos)
1873 return ax25;
1874 ++i;
1875 }
1876 return NULL;
1877} 1868}
1878 1869
1879static void *ax25_info_next(struct seq_file *seq, void *v, loff_t *pos) 1870static void *ax25_info_next(struct seq_file *seq, void *v, loff_t *pos)
1880{ 1871{
1881 ++*pos; 1872 return seq_hlist_next(v, &ax25_list, pos);
1882
1883 return hlist_entry( ((struct ax25_cb *)v)->ax25_node.next,
1884 struct ax25_cb, ax25_node);
1885} 1873}
1886 1874
1887static void ax25_info_stop(struct seq_file *seq, void *v) 1875static void ax25_info_stop(struct seq_file *seq, void *v)
@@ -1892,7 +1880,7 @@ static void ax25_info_stop(struct seq_file *seq, void *v)
1892 1880
1893static int ax25_info_show(struct seq_file *seq, void *v) 1881static int ax25_info_show(struct seq_file *seq, void *v)
1894{ 1882{
1895 ax25_cb *ax25 = v; 1883 ax25_cb *ax25 = hlist_entry(v, struct ax25_cb, ax25_node);
1896 char buf[11]; 1884 char buf[11];
1897 int k; 1885 int k;
1898 1886
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
index bf706f83a5c9..14912600ec57 100644
--- a/net/ax25/ax25_out.c
+++ b/net/ax25/ax25_out.c
@@ -92,6 +92,12 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax2
92#endif 92#endif
93 } 93 }
94 94
95 /*
96 * There is one ref for the state machine; a caller needs
97 * one more to put it back, just like with the existing one.
98 */
99 ax25_cb_hold(ax25);
100
95 ax25_cb_add(ax25); 101 ax25_cb_add(ax25);
96 102
97 ax25->state = AX25_STATE_1; 103 ax25->state = AX25_STATE_1;
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 832bcf092a01..9f13f6eefcba 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -146,31 +146,13 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
146static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos) 146static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos)
147 __acquires(ax25_uid_lock) 147 __acquires(ax25_uid_lock)
148{ 148{
149 struct ax25_uid_assoc *pt;
150 struct hlist_node *node;
151 int i = 1;
152
153 read_lock(&ax25_uid_lock); 149 read_lock(&ax25_uid_lock);
154 150 return seq_hlist_start_head(&ax25_uid_list, *pos);
155 if (*pos == 0)
156 return SEQ_START_TOKEN;
157
158 ax25_uid_for_each(pt, node, &ax25_uid_list) {
159 if (i == *pos)
160 return pt;
161 ++i;
162 }
163 return NULL;
164} 151}
165 152
166static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos) 153static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
167{ 154{
168 ++*pos; 155 return seq_hlist_next(v, &ax25_uid_list, pos);
169 if (v == SEQ_START_TOKEN)
170 return ax25_uid_list.first;
171 else
172 return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next,
173 ax25_uid_assoc, uid_node);
174} 156}
175 157
176static void ax25_uid_seq_stop(struct seq_file *seq, void *v) 158static void ax25_uid_seq_stop(struct seq_file *seq, void *v)
@@ -186,8 +168,9 @@ static int ax25_uid_seq_show(struct seq_file *seq, void *v)
186 if (v == SEQ_START_TOKEN) 168 if (v == SEQ_START_TOKEN)
187 seq_printf(seq, "Policy: %d\n", ax25_uid_policy); 169 seq_printf(seq, "Policy: %d\n", ax25_uid_policy);
188 else { 170 else {
189 struct ax25_uid_assoc *pt = v; 171 struct ax25_uid_assoc *pt;
190 172
173 pt = hlist_entry(v, struct ax25_uid_assoc, uid_node);
191 seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(buf, &pt->call)); 174 seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(buf, &pt->call));
192 } 175 }
193 return 0; 176 return 0;
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 26fb831ef7e0..b6234b73c4cf 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -64,7 +64,7 @@ static void bnep_net_set_mc_list(struct net_device *dev)
64 struct sk_buff *skb; 64 struct sk_buff *skb;
65 int size; 65 int size;
66 66
67 BT_DBG("%s mc_count %d", dev->name, dev->mc_count); 67 BT_DBG("%s mc_count %d", dev->name, netdev_mc_count(dev));
68 68
69 size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2; 69 size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2;
70 skb = alloc_skb(size, GFP_ATOMIC); 70 skb = alloc_skb(size, GFP_ATOMIC);
@@ -97,7 +97,9 @@ static void bnep_net_set_mc_list(struct net_device *dev)
97 97
98 /* FIXME: We should group addresses here. */ 98 /* FIXME: We should group addresses here. */
99 99
100 for (i = 0; i < dev->mc_count && i < BNEP_MAX_MULTICAST_FILTERS; i++) { 100 for (i = 0;
101 i < netdev_mc_count(dev) && i < BNEP_MAX_MULTICAST_FILTERS;
102 i++) {
101 memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); 103 memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN);
102 memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); 104 memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN);
103 dmi = dmi->next; 105 dmi = dmi->next;
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 97f8d68d574d..3487cfe74aec 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -21,7 +21,8 @@
21*/ 21*/
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24 24#include <linux/proc_fs.h>
25#include <linux/seq_file.h>
25#include <linux/types.h> 26#include <linux/types.h>
26#include <linux/errno.h> 27#include <linux/errno.h>
27#include <linux/kernel.h> 28#include <linux/kernel.h>
@@ -516,33 +517,37 @@ static char *cmtp_procinfo(struct capi_ctr *ctrl)
516 return "CAPI Message Transport Protocol"; 517 return "CAPI Message Transport Protocol";
517} 518}
518 519
519static int cmtp_ctr_read_proc(char *page, char **start, off_t off, int count, int *eof, struct capi_ctr *ctrl) 520static int cmtp_proc_show(struct seq_file *m, void *v)
520{ 521{
522 struct capi_ctr *ctrl = m->private;
521 struct cmtp_session *session = ctrl->driverdata; 523 struct cmtp_session *session = ctrl->driverdata;
522 struct cmtp_application *app; 524 struct cmtp_application *app;
523 struct list_head *p, *n; 525 struct list_head *p, *n;
524 int len = 0;
525 526
526 len += sprintf(page + len, "%s\n\n", cmtp_procinfo(ctrl)); 527 seq_printf(m, "%s\n\n", cmtp_procinfo(ctrl));
527 len += sprintf(page + len, "addr %s\n", session->name); 528 seq_printf(m, "addr %s\n", session->name);
528 len += sprintf(page + len, "ctrl %d\n", session->num); 529 seq_printf(m, "ctrl %d\n", session->num);
529 530
530 list_for_each_safe(p, n, &session->applications) { 531 list_for_each_safe(p, n, &session->applications) {
531 app = list_entry(p, struct cmtp_application, list); 532 app = list_entry(p, struct cmtp_application, list);
532 len += sprintf(page + len, "appl %d -> %d\n", app->appl, app->mapping); 533 seq_printf(m, "appl %d -> %d\n", app->appl, app->mapping);
533 } 534 }
534 535
535 if (off + count >= len) 536 return 0;
536 *eof = 1; 537}
537
538 if (len < off)
539 return 0;
540
541 *start = page + off;
542 538
543 return ((count < len - off) ? count : len - off); 539static int cmtp_proc_open(struct inode *inode, struct file *file)
540{
541 return single_open(file, cmtp_proc_show, PDE(inode)->data);
544} 542}
545 543
544static const struct file_operations cmtp_proc_fops = {
545 .owner = THIS_MODULE,
546 .open = cmtp_proc_open,
547 .read = seq_read,
548 .llseek = seq_lseek,
549 .release = single_release,
550};
546 551
547int cmtp_attach_device(struct cmtp_session *session) 552int cmtp_attach_device(struct cmtp_session *session)
548{ 553{
@@ -582,7 +587,7 @@ int cmtp_attach_device(struct cmtp_session *session)
582 session->ctrl.send_message = cmtp_send_message; 587 session->ctrl.send_message = cmtp_send_message;
583 588
584 session->ctrl.procinfo = cmtp_procinfo; 589 session->ctrl.procinfo = cmtp_procinfo;
585 session->ctrl.ctr_read_proc = cmtp_ctr_read_proc; 590 session->ctrl.proc_fops = &cmtp_proc_fops;
586 591
587 if (attach_capi_ctr(&session->ctrl) < 0) { 592 if (attach_capi_ctr(&session->ctrl) < 0) {
588 BT_ERR("Can't attach new controller"); 593 BT_ERR("Can't attach new controller");
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b7c4224f4e7d..b10e3cdb08f8 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -377,6 +377,9 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
377 377
378 if (acl->state == BT_CONNECTED && 378 if (acl->state == BT_CONNECTED &&
379 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 379 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
380 acl->power_save = 1;
381 hci_conn_enter_active_mode(acl);
382
380 if (lmp_esco_capable(hdev)) 383 if (lmp_esco_capable(hdev))
381 hci_setup_sync(sco, acl->handle); 384 hci_setup_sync(sco, acl->handle);
382 else 385 else
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 94ba34982021..4ad23192c7a5 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -491,6 +491,10 @@ int hci_dev_open(__u16 dev)
491 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 491 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
492 set_bit(HCI_RAW, &hdev->flags); 492 set_bit(HCI_RAW, &hdev->flags);
493 493
494 /* Treat all non BR/EDR controllers as raw devices for now */
495 if (hdev->dev_type != HCI_BREDR)
496 set_bit(HCI_RAW, &hdev->flags);
497
494 if (hdev->open(hdev)) { 498 if (hdev->open(hdev)) {
495 ret = -EIO; 499 ret = -EIO;
496 goto done; 500 goto done;
@@ -797,7 +801,7 @@ int hci_get_dev_info(void __user *arg)
797 801
798 strcpy(di.name, hdev->name); 802 strcpy(di.name, hdev->name);
799 di.bdaddr = hdev->bdaddr; 803 di.bdaddr = hdev->bdaddr;
800 di.type = hdev->type; 804 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
801 di.flags = hdev->flags; 805 di.flags = hdev->flags;
802 di.pkt_type = hdev->pkt_type; 806 di.pkt_type = hdev->pkt_type;
803 di.acl_mtu = hdev->acl_mtu; 807 di.acl_mtu = hdev->acl_mtu;
@@ -869,8 +873,8 @@ int hci_register_dev(struct hci_dev *hdev)
869 struct list_head *head = &hci_dev_list, *p; 873 struct list_head *head = &hci_dev_list, *p;
870 int i, id = 0; 874 int i, id = 0;
871 875
872 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, 876 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
873 hdev->type, hdev->owner); 877 hdev->bus, hdev->owner);
874 878
875 if (!hdev->open || !hdev->close || !hdev->destruct) 879 if (!hdev->open || !hdev->close || !hdev->destruct)
876 return -EINVAL; 880 return -EINVAL;
@@ -946,7 +950,7 @@ int hci_unregister_dev(struct hci_dev *hdev)
946{ 950{
947 int i; 951 int i;
948 952
949 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 953 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
950 954
951 write_lock_bh(&hci_dev_list_lock); 955 write_lock_bh(&hci_dev_list_lock);
952 list_del(&hdev->list); 956 list_del(&hdev->list);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 28517bad796c..6c57fc71c7e2 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1698,7 +1698,9 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
1698 hci_conn_add_sysfs(conn); 1698 hci_conn_add_sysfs(conn);
1699 break; 1699 break;
1700 1700
1701 case 0x11: /* Unsupported Feature or Parameter Value */
1701 case 0x1c: /* SCO interval rejected */ 1702 case 0x1c: /* SCO interval rejected */
1703 case 0x1a: /* Unsupported Remote Feature */
1702 case 0x1f: /* Unspecified error */ 1704 case 0x1f: /* Unspecified error */
1703 if (conn->out && conn->attempt < 2) { 1705 if (conn->out && conn->attempt < 2) {
1704 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 1706 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 688cfebfbee0..38f08f6b86f6 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -329,6 +329,9 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
329 } 329 }
330 330
331 if (mask & HCI_CMSG_TSTAMP) { 331 if (mask & HCI_CMSG_TSTAMP) {
332#ifdef CONFIG_COMPAT
333 struct compat_timeval ctv;
334#endif
332 struct timeval tv; 335 struct timeval tv;
333 void *data; 336 void *data;
334 int len; 337 int len;
@@ -339,7 +342,6 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
339 len = sizeof(tv); 342 len = sizeof(tv);
340#ifdef CONFIG_COMPAT 343#ifdef CONFIG_COMPAT
341 if (msg->msg_flags & MSG_CMSG_COMPAT) { 344 if (msg->msg_flags & MSG_CMSG_COMPAT) {
342 struct compat_timeval ctv;
343 ctv.tv_sec = tv.tv_sec; 345 ctv.tv_sec = tv.tv_sec;
344 ctv.tv_usec = tv.tv_usec; 346 ctv.tv_usec = tv.tv_usec;
345 data = &ctv; 347 data = &ctv;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 2bc6f6a8de68..05fd125f74fe 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -2,12 +2,16 @@
2 2
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/init.h> 4#include <linux/init.h>
5#include <linux/debugfs.h>
6#include <linux/seq_file.h>
5 7
6#include <net/bluetooth/bluetooth.h> 8#include <net/bluetooth/bluetooth.h>
7#include <net/bluetooth/hci_core.h> 9#include <net/bluetooth/hci_core.h>
8 10
9struct class *bt_class = NULL; 11static struct class *bt_class;
10EXPORT_SYMBOL_GPL(bt_class); 12
13struct dentry *bt_debugfs = NULL;
14EXPORT_SYMBOL_GPL(bt_debugfs);
11 15
12static struct workqueue_struct *bt_workq; 16static struct workqueue_struct *bt_workq;
13 17
@@ -166,9 +170,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
166 queue_work(bt_workq, &conn->work_del); 170 queue_work(bt_workq, &conn->work_del);
167} 171}
168 172
169static inline char *host_typetostr(int type) 173static inline char *host_bustostr(int bus)
170{ 174{
171 switch (type) { 175 switch (bus) {
172 case HCI_VIRTUAL: 176 case HCI_VIRTUAL:
173 return "VIRTUAL"; 177 return "VIRTUAL";
174 case HCI_USB: 178 case HCI_USB:
@@ -188,10 +192,28 @@ static inline char *host_typetostr(int type)
188 } 192 }
189} 193}
190 194
195static inline char *host_typetostr(int type)
196{
197 switch (type) {
198 case HCI_BREDR:
199 return "BR/EDR";
200 case HCI_80211:
201 return "802.11";
202 default:
203 return "UNKNOWN";
204 }
205}
206
207static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf)
208{
209 struct hci_dev *hdev = dev_get_drvdata(dev);
210 return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
211}
212
191static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) 213static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
192{ 214{
193 struct hci_dev *hdev = dev_get_drvdata(dev); 215 struct hci_dev *hdev = dev_get_drvdata(dev);
194 return sprintf(buf, "%s\n", host_typetostr(hdev->type)); 216 return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
195} 217}
196 218
197static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) 219static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
@@ -251,32 +273,6 @@ static ssize_t show_hci_revision(struct device *dev, struct device_attribute *at
251 return sprintf(buf, "%d\n", hdev->hci_rev); 273 return sprintf(buf, "%d\n", hdev->hci_rev);
252} 274}
253 275
254static ssize_t show_inquiry_cache(struct device *dev, struct device_attribute *attr, char *buf)
255{
256 struct hci_dev *hdev = dev_get_drvdata(dev);
257 struct inquiry_cache *cache = &hdev->inq_cache;
258 struct inquiry_entry *e;
259 int n = 0;
260
261 hci_dev_lock_bh(hdev);
262
263 for (e = cache->list; e; e = e->next) {
264 struct inquiry_data *data = &e->data;
265 bdaddr_t bdaddr;
266 baswap(&bdaddr, &data->bdaddr);
267 n += sprintf(buf + n, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
268 batostr(&bdaddr),
269 data->pscan_rep_mode, data->pscan_period_mode,
270 data->pscan_mode, data->dev_class[2],
271 data->dev_class[1], data->dev_class[0],
272 __le16_to_cpu(data->clock_offset),
273 data->rssi, data->ssp_mode, e->timestamp);
274 }
275
276 hci_dev_unlock_bh(hdev);
277 return n;
278}
279
280static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) 276static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf)
281{ 277{
282 struct hci_dev *hdev = dev_get_drvdata(dev); 278 struct hci_dev *hdev = dev_get_drvdata(dev);
@@ -355,6 +351,7 @@ static ssize_t store_sniff_min_interval(struct device *dev, struct device_attrib
355 return count; 351 return count;
356} 352}
357 353
354static DEVICE_ATTR(bus, S_IRUGO, show_bus, NULL);
358static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); 355static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
359static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); 356static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
360static DEVICE_ATTR(class, S_IRUGO, show_class, NULL); 357static DEVICE_ATTR(class, S_IRUGO, show_class, NULL);
@@ -363,7 +360,6 @@ static DEVICE_ATTR(features, S_IRUGO, show_features, NULL);
363static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL); 360static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL);
364static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL); 361static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
365static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); 362static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
366static DEVICE_ATTR(inquiry_cache, S_IRUGO, show_inquiry_cache, NULL);
367 363
368static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, 364static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
369 show_idle_timeout, store_idle_timeout); 365 show_idle_timeout, store_idle_timeout);
@@ -373,6 +369,7 @@ static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
373 show_sniff_min_interval, store_sniff_min_interval); 369 show_sniff_min_interval, store_sniff_min_interval);
374 370
375static struct attribute *bt_host_attrs[] = { 371static struct attribute *bt_host_attrs[] = {
372 &dev_attr_bus.attr,
376 &dev_attr_type.attr, 373 &dev_attr_type.attr,
377 &dev_attr_name.attr, 374 &dev_attr_name.attr,
378 &dev_attr_class.attr, 375 &dev_attr_class.attr,
@@ -381,7 +378,6 @@ static struct attribute *bt_host_attrs[] = {
381 &dev_attr_manufacturer.attr, 378 &dev_attr_manufacturer.attr,
382 &dev_attr_hci_version.attr, 379 &dev_attr_hci_version.attr,
383 &dev_attr_hci_revision.attr, 380 &dev_attr_hci_revision.attr,
384 &dev_attr_inquiry_cache.attr,
385 &dev_attr_idle_timeout.attr, 381 &dev_attr_idle_timeout.attr,
386 &dev_attr_sniff_max_interval.attr, 382 &dev_attr_sniff_max_interval.attr,
387 &dev_attr_sniff_min_interval.attr, 383 &dev_attr_sniff_min_interval.attr,
@@ -409,12 +405,50 @@ static struct device_type bt_host = {
409 .release = bt_host_release, 405 .release = bt_host_release,
410}; 406};
411 407
408static int inquiry_cache_show(struct seq_file *f, void *p)
409{
410 struct hci_dev *hdev = f->private;
411 struct inquiry_cache *cache = &hdev->inq_cache;
412 struct inquiry_entry *e;
413
414 hci_dev_lock_bh(hdev);
415
416 for (e = cache->list; e; e = e->next) {
417 struct inquiry_data *data = &e->data;
418 bdaddr_t bdaddr;
419 baswap(&bdaddr, &data->bdaddr);
420 seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
421 batostr(&bdaddr),
422 data->pscan_rep_mode, data->pscan_period_mode,
423 data->pscan_mode, data->dev_class[2],
424 data->dev_class[1], data->dev_class[0],
425 __le16_to_cpu(data->clock_offset),
426 data->rssi, data->ssp_mode, e->timestamp);
427 }
428
429 hci_dev_unlock_bh(hdev);
430
431 return 0;
432}
433
434static int inquiry_cache_open(struct inode *inode, struct file *file)
435{
436 return single_open(file, inquiry_cache_show, inode->i_private);
437}
438
439static const struct file_operations inquiry_cache_fops = {
440 .open = inquiry_cache_open,
441 .read = seq_read,
442 .llseek = seq_lseek,
443 .release = single_release,
444};
445
412int hci_register_sysfs(struct hci_dev *hdev) 446int hci_register_sysfs(struct hci_dev *hdev)
413{ 447{
414 struct device *dev = &hdev->dev; 448 struct device *dev = &hdev->dev;
415 int err; 449 int err;
416 450
417 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 451 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
418 452
419 dev->type = &bt_host; 453 dev->type = &bt_host;
420 dev->class = bt_class; 454 dev->class = bt_class;
@@ -428,12 +462,24 @@ int hci_register_sysfs(struct hci_dev *hdev)
428 if (err < 0) 462 if (err < 0)
429 return err; 463 return err;
430 464
465 if (!bt_debugfs)
466 return 0;
467
468 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
469 if (!hdev->debugfs)
470 return 0;
471
472 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
473 hdev, &inquiry_cache_fops);
474
431 return 0; 475 return 0;
432} 476}
433 477
434void hci_unregister_sysfs(struct hci_dev *hdev) 478void hci_unregister_sysfs(struct hci_dev *hdev)
435{ 479{
436 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 480 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
481
482 debugfs_remove_recursive(hdev->debugfs);
437 483
438 device_del(&hdev->dev); 484 device_del(&hdev->dev);
439} 485}
@@ -444,6 +490,8 @@ int __init bt_sysfs_init(void)
444 if (!bt_workq) 490 if (!bt_workq)
445 return -ENOMEM; 491 return -ENOMEM;
446 492
493 bt_debugfs = debugfs_create_dir("bluetooth", NULL);
494
447 bt_class = class_create(THIS_MODULE, "bluetooth"); 495 bt_class = class_create(THIS_MODULE, "bluetooth");
448 if (IS_ERR(bt_class)) { 496 if (IS_ERR(bt_class)) {
449 destroy_workqueue(bt_workq); 497 destroy_workqueue(bt_workq);
@@ -455,7 +503,9 @@ int __init bt_sysfs_init(void)
455 503
456void bt_sysfs_cleanup(void) 504void bt_sysfs_cleanup(void)
457{ 505{
458 destroy_workqueue(bt_workq);
459
460 class_destroy(bt_class); 506 class_destroy(bt_class);
507
508 debugfs_remove_recursive(bt_debugfs);
509
510 destroy_workqueue(bt_workq);
461} 511}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 569750010fd3..280529ad9274 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -243,6 +243,39 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
243 input_sync(dev); 243 input_sync(dev);
244} 244}
245 245
246static int __hidp_send_ctrl_message(struct hidp_session *session,
247 unsigned char hdr, unsigned char *data, int size)
248{
249 struct sk_buff *skb;
250
251 BT_DBG("session %p data %p size %d", session, data, size);
252
253 if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
254 BT_ERR("Can't allocate memory for new frame");
255 return -ENOMEM;
256 }
257
258 *skb_put(skb, 1) = hdr;
259 if (data && size > 0)
260 memcpy(skb_put(skb, size), data, size);
261
262 skb_queue_tail(&session->ctrl_transmit, skb);
263
264 return 0;
265}
266
267static inline int hidp_send_ctrl_message(struct hidp_session *session,
268 unsigned char hdr, unsigned char *data, int size)
269{
270 int err;
271
272 err = __hidp_send_ctrl_message(session, hdr, data, size);
273
274 hidp_schedule(session);
275
276 return err;
277}
278
246static int hidp_queue_report(struct hidp_session *session, 279static int hidp_queue_report(struct hidp_session *session,
247 unsigned char *data, int size) 280 unsigned char *data, int size)
248{ 281{
@@ -280,9 +313,22 @@ static int hidp_send_report(struct hidp_session *session, struct hid_report *rep
280 return hidp_queue_report(session, buf, rsize); 313 return hidp_queue_report(session, buf, rsize);
281} 314}
282 315
283static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count) 316static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
317 unsigned char report_type)
284{ 318{
285 if (hidp_queue_report(hid->driver_data, data, count)) 319 switch (report_type) {
320 case HID_FEATURE_REPORT:
321 report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE;
322 break;
323 case HID_OUTPUT_REPORT:
324 report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
325 break;
326 default:
327 return -EINVAL;
328 }
329
330 if (hidp_send_ctrl_message(hid->driver_data, report_type,
331 data, count))
286 return -ENOMEM; 332 return -ENOMEM;
287 return count; 333 return count;
288} 334}
@@ -307,39 +353,6 @@ static inline void hidp_del_timer(struct hidp_session *session)
307 del_timer(&session->timer); 353 del_timer(&session->timer);
308} 354}
309 355
310static int __hidp_send_ctrl_message(struct hidp_session *session,
311 unsigned char hdr, unsigned char *data, int size)
312{
313 struct sk_buff *skb;
314
315 BT_DBG("session %p data %p size %d", session, data, size);
316
317 if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
318 BT_ERR("Can't allocate memory for new frame");
319 return -ENOMEM;
320 }
321
322 *skb_put(skb, 1) = hdr;
323 if (data && size > 0)
324 memcpy(skb_put(skb, size), data, size);
325
326 skb_queue_tail(&session->ctrl_transmit, skb);
327
328 return 0;
329}
330
331static inline int hidp_send_ctrl_message(struct hidp_session *session,
332 unsigned char hdr, unsigned char *data, int size)
333{
334 int err;
335
336 err = __hidp_send_ctrl_message(session, hdr, data, size);
337
338 hidp_schedule(session);
339
340 return err;
341}
342
343static void hidp_process_handshake(struct hidp_session *session, 356static void hidp_process_handshake(struct hidp_session *session,
344 unsigned char param) 357 unsigned char param)
345{ 358{
@@ -701,29 +714,9 @@ static void hidp_close(struct hid_device *hid)
701static int hidp_parse(struct hid_device *hid) 714static int hidp_parse(struct hid_device *hid)
702{ 715{
703 struct hidp_session *session = hid->driver_data; 716 struct hidp_session *session = hid->driver_data;
704 struct hidp_connadd_req *req = session->req;
705 unsigned char *buf;
706 int ret;
707
708 buf = kmalloc(req->rd_size, GFP_KERNEL);
709 if (!buf)
710 return -ENOMEM;
711
712 if (copy_from_user(buf, req->rd_data, req->rd_size)) {
713 kfree(buf);
714 return -EFAULT;
715 }
716
717 ret = hid_parse_report(session->hid, buf, req->rd_size);
718
719 kfree(buf);
720
721 if (ret)
722 return ret;
723
724 session->req = NULL;
725 717
726 return 0; 718 return hid_parse_report(session->hid, session->rd_data,
719 session->rd_size);
727} 720}
728 721
729static int hidp_start(struct hid_device *hid) 722static int hidp_start(struct hid_device *hid)
@@ -768,12 +761,24 @@ static int hidp_setup_hid(struct hidp_session *session,
768 bdaddr_t src, dst; 761 bdaddr_t src, dst;
769 int err; 762 int err;
770 763
764 session->rd_data = kzalloc(req->rd_size, GFP_KERNEL);
765 if (!session->rd_data)
766 return -ENOMEM;
767
768 if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) {
769 err = -EFAULT;
770 goto fault;
771 }
772 session->rd_size = req->rd_size;
773
771 hid = hid_allocate_device(); 774 hid = hid_allocate_device();
772 if (IS_ERR(hid)) 775 if (IS_ERR(hid)) {
773 return PTR_ERR(session->hid); 776 err = PTR_ERR(hid);
777 goto fault;
778 }
774 779
775 session->hid = hid; 780 session->hid = hid;
776 session->req = req; 781
777 hid->driver_data = session; 782 hid->driver_data = session;
778 783
779 baswap(&src, &bt_sk(session->ctrl_sock->sk)->src); 784 baswap(&src, &bt_sk(session->ctrl_sock->sk)->src);
@@ -804,6 +809,10 @@ failed:
804 hid_destroy_device(hid); 809 hid_destroy_device(hid);
805 session->hid = NULL; 810 session->hid = NULL;
806 811
812fault:
813 kfree(session->rd_data);
814 session->rd_data = NULL;
815
807 return err; 816 return err;
808} 817}
809 818
@@ -898,6 +907,9 @@ unlink:
898 session->hid = NULL; 907 session->hid = NULL;
899 } 908 }
900 909
910 kfree(session->rd_data);
911 session->rd_data = NULL;
912
901purge: 913purge:
902 skb_queue_purge(&session->ctrl_transmit); 914 skb_queue_purge(&session->ctrl_transmit);
903 skb_queue_purge(&session->intr_transmit); 915 skb_queue_purge(&session->intr_transmit);
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index faf3d74c3586..a4e215d50c10 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -154,7 +154,9 @@ struct hidp_session {
154 struct sk_buff_head ctrl_transmit; 154 struct sk_buff_head ctrl_transmit;
155 struct sk_buff_head intr_transmit; 155 struct sk_buff_head intr_transmit;
156 156
157 struct hidp_connadd_req *req; 157 /* Report descriptor */
158 __u8 *rd_data;
159 uint rd_size;
158}; 160};
159 161
160static inline void hidp_schedule(struct hidp_session *session) 162static inline void hidp_schedule(struct hidp_session *session)
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 5129b88c8e5b..7794a2e2adce 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -40,6 +40,8 @@
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/list.h> 41#include <linux/list.h>
42#include <linux/device.h> 42#include <linux/device.h>
43#include <linux/debugfs.h>
44#include <linux/seq_file.h>
43#include <linux/uaccess.h> 45#include <linux/uaccess.h>
44#include <linux/crc16.h> 46#include <linux/crc16.h>
45#include <net/sock.h> 47#include <net/sock.h>
@@ -1212,6 +1214,7 @@ static void l2cap_monitor_timeout(unsigned long arg)
1212 bh_lock_sock(sk); 1214 bh_lock_sock(sk);
1213 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) { 1215 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1214 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk); 1216 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1217 bh_unlock_sock(sk);
1215 return; 1218 return;
1216 } 1219 }
1217 1220
@@ -1367,7 +1370,6 @@ static int l2cap_ertm_send(struct sock *sk)
1367 1370
1368 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) && 1371 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1369 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) { 1372 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1370 tx_skb = skb_clone(skb, GFP_ATOMIC);
1371 1373
1372 if (pi->remote_max_tx && 1374 if (pi->remote_max_tx &&
1373 bt_cb(skb)->retries == pi->remote_max_tx) { 1375 bt_cb(skb)->retries == pi->remote_max_tx) {
@@ -1375,6 +1377,8 @@ static int l2cap_ertm_send(struct sock *sk)
1375 break; 1377 break;
1376 } 1378 }
1377 1379
1380 tx_skb = skb_clone(skb, GFP_ATOMIC);
1381
1378 bt_cb(skb)->retries++; 1382 bt_cb(skb)->retries++;
1379 1383
1380 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1384 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
@@ -2828,6 +2832,11 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2828 int len = cmd->len - sizeof(*rsp); 2832 int len = cmd->len - sizeof(*rsp);
2829 char req[64]; 2833 char req[64];
2830 2834
2835 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2836 l2cap_send_disconn_req(conn, sk);
2837 goto done;
2838 }
2839
2831 /* throw out any old stored conf requests */ 2840 /* throw out any old stored conf requests */
2832 result = L2CAP_CONF_SUCCESS; 2841 result = L2CAP_CONF_SUCCESS;
2833 len = l2cap_parse_conf_rsp(sk, rsp->data, 2842 len = l2cap_parse_conf_rsp(sk, rsp->data,
@@ -3435,8 +3444,8 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str
3435 (pi->unacked_frames > 0)) 3444 (pi->unacked_frames > 0))
3436 __mod_retrans_timer(); 3445 __mod_retrans_timer();
3437 3446
3438 l2cap_ertm_send(sk);
3439 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3447 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3448 l2cap_ertm_send(sk);
3440 } 3449 }
3441 break; 3450 break;
3442 3451
@@ -3471,9 +3480,9 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str
3471 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3480 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3472 3481
3473 if (rx_control & L2CAP_CTRL_POLL) { 3482 if (rx_control & L2CAP_CTRL_POLL) {
3474 l2cap_retransmit_frame(sk, tx_seq);
3475 pi->expected_ack_seq = tx_seq; 3483 pi->expected_ack_seq = tx_seq;
3476 l2cap_drop_acked_frames(sk); 3484 l2cap_drop_acked_frames(sk);
3485 l2cap_retransmit_frame(sk, tx_seq);
3477 l2cap_ertm_send(sk); 3486 l2cap_ertm_send(sk);
3478 if (pi->conn_state & L2CAP_CONN_WAIT_F) { 3487 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3479 pi->srej_save_reqseq = tx_seq; 3488 pi->srej_save_reqseq = tx_seq;
@@ -3517,7 +3526,6 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3517 struct l2cap_pinfo *pi; 3526 struct l2cap_pinfo *pi;
3518 u16 control, len; 3527 u16 control, len;
3519 u8 tx_seq; 3528 u8 tx_seq;
3520 int err;
3521 3529
3522 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid); 3530 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3523 if (!sk) { 3531 if (!sk) {
@@ -3569,13 +3577,11 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3569 goto drop; 3577 goto drop;
3570 3578
3571 if (__is_iframe(control)) 3579 if (__is_iframe(control))
3572 err = l2cap_data_channel_iframe(sk, control, skb); 3580 l2cap_data_channel_iframe(sk, control, skb);
3573 else 3581 else
3574 err = l2cap_data_channel_sframe(sk, control, skb); 3582 l2cap_data_channel_sframe(sk, control, skb);
3575 3583
3576 if (!err) 3584 goto done;
3577 goto done;
3578 break;
3579 3585
3580 case L2CAP_MODE_STREAMING: 3586 case L2CAP_MODE_STREAMING:
3581 control = get_unaligned_le16(skb->data); 3587 control = get_unaligned_le16(skb->data);
@@ -3601,7 +3607,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3601 else 3607 else
3602 pi->expected_tx_seq = tx_seq + 1; 3608 pi->expected_tx_seq = tx_seq + 1;
3603 3609
3604 err = l2cap_sar_reassembly_sdu(sk, skb, control); 3610 l2cap_sar_reassembly_sdu(sk, skb, control);
3605 3611
3606 goto done; 3612 goto done;
3607 3613
@@ -3938,29 +3944,42 @@ drop:
3938 return 0; 3944 return 0;
3939} 3945}
3940 3946
3941static ssize_t l2cap_sysfs_show(struct class *dev, char *buf) 3947static int l2cap_debugfs_show(struct seq_file *f, void *p)
3942{ 3948{
3943 struct sock *sk; 3949 struct sock *sk;
3944 struct hlist_node *node; 3950 struct hlist_node *node;
3945 char *str = buf;
3946 3951
3947 read_lock_bh(&l2cap_sk_list.lock); 3952 read_lock_bh(&l2cap_sk_list.lock);
3948 3953
3949 sk_for_each(sk, node, &l2cap_sk_list.head) { 3954 sk_for_each(sk, node, &l2cap_sk_list.head) {
3950 struct l2cap_pinfo *pi = l2cap_pi(sk); 3955 struct l2cap_pinfo *pi = l2cap_pi(sk);
3951 3956
3952 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", 3957 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3953 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 3958 batostr(&bt_sk(sk)->src),
3954 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid, 3959 batostr(&bt_sk(sk)->dst),
3955 pi->dcid, pi->imtu, pi->omtu, pi->sec_level); 3960 sk->sk_state, __le16_to_cpu(pi->psm),
3961 pi->scid, pi->dcid,
3962 pi->imtu, pi->omtu, pi->sec_level);
3956 } 3963 }
3957 3964
3958 read_unlock_bh(&l2cap_sk_list.lock); 3965 read_unlock_bh(&l2cap_sk_list.lock);
3959 3966
3960 return str - buf; 3967 return 0;
3968}
3969
3970static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3971{
3972 return single_open(file, l2cap_debugfs_show, inode->i_private);
3961} 3973}
3962 3974
3963static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL); 3975static const struct file_operations l2cap_debugfs_fops = {
3976 .open = l2cap_debugfs_open,
3977 .read = seq_read,
3978 .llseek = seq_lseek,
3979 .release = single_release,
3980};
3981
3982static struct dentry *l2cap_debugfs;
3964 3983
3965static const struct proto_ops l2cap_sock_ops = { 3984static const struct proto_ops l2cap_sock_ops = {
3966 .family = PF_BLUETOOTH, 3985 .family = PF_BLUETOOTH,
@@ -4020,8 +4039,12 @@ static int __init l2cap_init(void)
4020 goto error; 4039 goto error;
4021 } 4040 }
4022 4041
4023 if (class_create_file(bt_class, &class_attr_l2cap) < 0) 4042 if (bt_debugfs) {
4024 BT_ERR("Failed to create L2CAP info file"); 4043 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4044 bt_debugfs, NULL, &l2cap_debugfs_fops);
4045 if (!l2cap_debugfs)
4046 BT_ERR("Failed to create L2CAP debug file");
4047 }
4025 4048
4026 BT_INFO("L2CAP ver %s", VERSION); 4049 BT_INFO("L2CAP ver %s", VERSION);
4027 BT_INFO("L2CAP socket layer initialized"); 4050 BT_INFO("L2CAP socket layer initialized");
@@ -4035,7 +4058,7 @@ error:
4035 4058
4036static void __exit l2cap_exit(void) 4059static void __exit l2cap_exit(void)
4037{ 4060{
4038 class_remove_file(bt_class, &class_attr_l2cap); 4061 debugfs_remove(l2cap_debugfs);
4039 4062
4040 if (bt_sock_unregister(BTPROTO_L2CAP) < 0) 4063 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4041 BT_ERR("L2CAP socket unregistration failed"); 4064 BT_ERR("L2CAP socket unregistration failed");
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index fc5ee3296e22..13f114e8b0f9 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -33,6 +33,8 @@
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/wait.h> 34#include <linux/wait.h>
35#include <linux/device.h> 35#include <linux/device.h>
36#include <linux/debugfs.h>
37#include <linux/seq_file.h>
36#include <linux/net.h> 38#include <linux/net.h>
37#include <linux/mutex.h> 39#include <linux/mutex.h>
38#include <linux/kthread.h> 40#include <linux/kthread.h>
@@ -252,7 +254,6 @@ static void rfcomm_session_timeout(unsigned long arg)
252 BT_DBG("session %p state %ld", s, s->state); 254 BT_DBG("session %p state %ld", s, s->state);
253 255
254 set_bit(RFCOMM_TIMED_OUT, &s->flags); 256 set_bit(RFCOMM_TIMED_OUT, &s->flags);
255 rfcomm_session_put(s);
256 rfcomm_schedule(RFCOMM_SCHED_TIMEO); 257 rfcomm_schedule(RFCOMM_SCHED_TIMEO);
257} 258}
258 259
@@ -1151,7 +1152,11 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
1151 break; 1152 break;
1152 1153
1153 case BT_DISCONN: 1154 case BT_DISCONN:
1154 rfcomm_session_put(s); 1155 /* When socket is closed and we are not RFCOMM
1156 * initiator rfcomm_process_rx already calls
1157 * rfcomm_session_put() */
1158 if (s->sock->sk->sk_state != BT_CLOSED)
1159 rfcomm_session_put(s);
1155 break; 1160 break;
1156 } 1161 }
1157 } 1162 }
@@ -1920,6 +1925,7 @@ static inline void rfcomm_process_sessions(void)
1920 if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { 1925 if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
1921 s->state = BT_DISCONN; 1926 s->state = BT_DISCONN;
1922 rfcomm_send_disc(s, 0); 1927 rfcomm_send_disc(s, 0);
1928 rfcomm_session_put(s);
1923 continue; 1929 continue;
1924 } 1930 }
1925 1931
@@ -2094,11 +2100,10 @@ static struct hci_cb rfcomm_cb = {
2094 .security_cfm = rfcomm_security_cfm 2100 .security_cfm = rfcomm_security_cfm
2095}; 2101};
2096 2102
2097static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf) 2103static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)
2098{ 2104{
2099 struct rfcomm_session *s; 2105 struct rfcomm_session *s;
2100 struct list_head *pp, *p; 2106 struct list_head *pp, *p;
2101 char *str = buf;
2102 2107
2103 rfcomm_lock(); 2108 rfcomm_lock();
2104 2109
@@ -2108,18 +2113,32 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf)
2108 struct sock *sk = s->sock->sk; 2113 struct sock *sk = s->sock->sk;
2109 struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list); 2114 struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
2110 2115
2111 str += sprintf(str, "%s %s %ld %d %d %d %d\n", 2116 seq_printf(f, "%s %s %ld %d %d %d %d\n",
2112 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 2117 batostr(&bt_sk(sk)->src),
2113 d->state, d->dlci, d->mtu, d->rx_credits, d->tx_credits); 2118 batostr(&bt_sk(sk)->dst),
2119 d->state, d->dlci, d->mtu,
2120 d->rx_credits, d->tx_credits);
2114 } 2121 }
2115 } 2122 }
2116 2123
2117 rfcomm_unlock(); 2124 rfcomm_unlock();
2118 2125
2119 return (str - buf); 2126 return 0;
2127}
2128
2129static int rfcomm_dlc_debugfs_open(struct inode *inode, struct file *file)
2130{
2131 return single_open(file, rfcomm_dlc_debugfs_show, inode->i_private);
2120} 2132}
2121 2133
2122static CLASS_ATTR(rfcomm_dlc, S_IRUGO, rfcomm_dlc_sysfs_show, NULL); 2134static const struct file_operations rfcomm_dlc_debugfs_fops = {
2135 .open = rfcomm_dlc_debugfs_open,
2136 .read = seq_read,
2137 .llseek = seq_lseek,
2138 .release = single_release,
2139};
2140
2141static struct dentry *rfcomm_dlc_debugfs;
2123 2142
2124/* ---- Initialization ---- */ 2143/* ---- Initialization ---- */
2125static int __init rfcomm_init(void) 2144static int __init rfcomm_init(void)
@@ -2136,8 +2155,12 @@ static int __init rfcomm_init(void)
2136 goto unregister; 2155 goto unregister;
2137 } 2156 }
2138 2157
2139 if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) 2158 if (bt_debugfs) {
2140 BT_ERR("Failed to create RFCOMM info file"); 2159 rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444,
2160 bt_debugfs, NULL, &rfcomm_dlc_debugfs_fops);
2161 if (!rfcomm_dlc_debugfs)
2162 BT_ERR("Failed to create RFCOMM debug file");
2163 }
2141 2164
2142 err = rfcomm_init_ttys(); 2165 err = rfcomm_init_ttys();
2143 if (err < 0) 2166 if (err < 0)
@@ -2165,7 +2188,7 @@ unregister:
2165 2188
2166static void __exit rfcomm_exit(void) 2189static void __exit rfcomm_exit(void)
2167{ 2190{
2168 class_remove_file(bt_class, &class_attr_rfcomm_dlc); 2191 debugfs_remove(rfcomm_dlc_debugfs);
2169 2192
2170 hci_unregister_cb(&rfcomm_cb); 2193 hci_unregister_cb(&rfcomm_cb);
2171 2194
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 4b5968dda673..7f439765403d 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -40,6 +40,8 @@
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/list.h> 41#include <linux/list.h>
42#include <linux/device.h> 42#include <linux/device.h>
43#include <linux/debugfs.h>
44#include <linux/seq_file.h>
43#include <net/sock.h> 45#include <net/sock.h>
44 46
45#include <asm/system.h> 47#include <asm/system.h>
@@ -1061,26 +1063,38 @@ done:
1061 return result; 1063 return result;
1062} 1064}
1063 1065
1064static ssize_t rfcomm_sock_sysfs_show(struct class *dev, char *buf) 1066static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
1065{ 1067{
1066 struct sock *sk; 1068 struct sock *sk;
1067 struct hlist_node *node; 1069 struct hlist_node *node;
1068 char *str = buf;
1069 1070
1070 read_lock_bh(&rfcomm_sk_list.lock); 1071 read_lock_bh(&rfcomm_sk_list.lock);
1071 1072
1072 sk_for_each(sk, node, &rfcomm_sk_list.head) { 1073 sk_for_each(sk, node, &rfcomm_sk_list.head) {
1073 str += sprintf(str, "%s %s %d %d\n", 1074 seq_printf(f, "%s %s %d %d\n",
1074 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 1075 batostr(&bt_sk(sk)->src),
1076 batostr(&bt_sk(sk)->dst),
1075 sk->sk_state, rfcomm_pi(sk)->channel); 1077 sk->sk_state, rfcomm_pi(sk)->channel);
1076 } 1078 }
1077 1079
1078 read_unlock_bh(&rfcomm_sk_list.lock); 1080 read_unlock_bh(&rfcomm_sk_list.lock);
1079 1081
1080 return (str - buf); 1082 return 0;
1081} 1083}
1082 1084
1083static CLASS_ATTR(rfcomm, S_IRUGO, rfcomm_sock_sysfs_show, NULL); 1085static int rfcomm_sock_debugfs_open(struct inode *inode, struct file *file)
1086{
1087 return single_open(file, rfcomm_sock_debugfs_show, inode->i_private);
1088}
1089
1090static const struct file_operations rfcomm_sock_debugfs_fops = {
1091 .open = rfcomm_sock_debugfs_open,
1092 .read = seq_read,
1093 .llseek = seq_lseek,
1094 .release = single_release,
1095};
1096
1097static struct dentry *rfcomm_sock_debugfs;
1084 1098
1085static const struct proto_ops rfcomm_sock_ops = { 1099static const struct proto_ops rfcomm_sock_ops = {
1086 .family = PF_BLUETOOTH, 1100 .family = PF_BLUETOOTH,
@@ -1120,8 +1134,12 @@ int __init rfcomm_init_sockets(void)
1120 if (err < 0) 1134 if (err < 0)
1121 goto error; 1135 goto error;
1122 1136
1123 if (class_create_file(bt_class, &class_attr_rfcomm) < 0) 1137 if (bt_debugfs) {
1124 BT_ERR("Failed to create RFCOMM info file"); 1138 rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
1139 bt_debugfs, NULL, &rfcomm_sock_debugfs_fops);
1140 if (!rfcomm_sock_debugfs)
1141 BT_ERR("Failed to create RFCOMM debug file");
1142 }
1125 1143
1126 BT_INFO("RFCOMM socket layer initialized"); 1144 BT_INFO("RFCOMM socket layer initialized");
1127 1145
@@ -1135,7 +1153,7 @@ error:
1135 1153
1136void rfcomm_cleanup_sockets(void) 1154void rfcomm_cleanup_sockets(void)
1137{ 1155{
1138 class_remove_file(bt_class, &class_attr_rfcomm); 1156 debugfs_remove(rfcomm_sock_debugfs);
1139 1157
1140 if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) 1158 if (bt_sock_unregister(BTPROTO_RFCOMM) < 0)
1141 BT_ERR("RFCOMM socket layer unregistration failed"); 1159 BT_ERR("RFCOMM socket layer unregistration failed");
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index dd8f6ec57dce..e5b16b76b22e 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -38,6 +38,8 @@
38#include <linux/socket.h> 38#include <linux/socket.h>
39#include <linux/skbuff.h> 39#include <linux/skbuff.h>
40#include <linux/device.h> 40#include <linux/device.h>
41#include <linux/debugfs.h>
42#include <linux/seq_file.h>
41#include <linux/list.h> 43#include <linux/list.h>
42#include <net/sock.h> 44#include <net/sock.h>
43 45
@@ -953,26 +955,36 @@ drop:
953 return 0; 955 return 0;
954} 956}
955 957
956static ssize_t sco_sysfs_show(struct class *dev, char *buf) 958static int sco_debugfs_show(struct seq_file *f, void *p)
957{ 959{
958 struct sock *sk; 960 struct sock *sk;
959 struct hlist_node *node; 961 struct hlist_node *node;
960 char *str = buf;
961 962
962 read_lock_bh(&sco_sk_list.lock); 963 read_lock_bh(&sco_sk_list.lock);
963 964
964 sk_for_each(sk, node, &sco_sk_list.head) { 965 sk_for_each(sk, node, &sco_sk_list.head) {
965 str += sprintf(str, "%s %s %d\n", 966 seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
966 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), 967 batostr(&bt_sk(sk)->dst), sk->sk_state);
967 sk->sk_state);
968 } 968 }
969 969
970 read_unlock_bh(&sco_sk_list.lock); 970 read_unlock_bh(&sco_sk_list.lock);
971 971
972 return (str - buf); 972 return 0;
973} 973}
974 974
975static CLASS_ATTR(sco, S_IRUGO, sco_sysfs_show, NULL); 975static int sco_debugfs_open(struct inode *inode, struct file *file)
976{
977 return single_open(file, sco_debugfs_show, inode->i_private);
978}
979
980static const struct file_operations sco_debugfs_fops = {
981 .open = sco_debugfs_open,
982 .read = seq_read,
983 .llseek = seq_lseek,
984 .release = single_release,
985};
986
987static struct dentry *sco_debugfs;
976 988
977static const struct proto_ops sco_sock_ops = { 989static const struct proto_ops sco_sock_ops = {
978 .family = PF_BLUETOOTH, 990 .family = PF_BLUETOOTH,
@@ -1030,8 +1042,12 @@ static int __init sco_init(void)
1030 goto error; 1042 goto error;
1031 } 1043 }
1032 1044
1033 if (class_create_file(bt_class, &class_attr_sco) < 0) 1045 if (bt_debugfs) {
1034 BT_ERR("Failed to create SCO info file"); 1046 sco_debugfs = debugfs_create_file("sco", 0444,
1047 bt_debugfs, NULL, &sco_debugfs_fops);
1048 if (!sco_debugfs)
1049 BT_ERR("Failed to create SCO debug file");
1050 }
1035 1051
1036 BT_INFO("SCO (Voice Link) ver %s", VERSION); 1052 BT_INFO("SCO (Voice Link) ver %s", VERSION);
1037 BT_INFO("SCO socket layer initialized"); 1053 BT_INFO("SCO socket layer initialized");
@@ -1045,7 +1061,7 @@ error:
1045 1061
1046static void __exit sco_exit(void) 1062static void __exit sco_exit(void)
1047{ 1063{
1048 class_remove_file(bt_class, &class_attr_sco); 1064 debugfs_remove(sco_debugfs);
1049 1065
1050 if (bt_sock_unregister(BTPROTO_SCO) < 0) 1066 if (bt_sock_unregister(BTPROTO_SCO) < 0)
1051 BT_ERR("SCO socket unregistration failed"); 1067 BT_ERR("SCO socket unregistration failed");
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index e143ca678881..d115d5cea5b6 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -31,3 +31,17 @@ config BRIDGE
31 will be called bridge. 31 will be called bridge.
32 32
33 If unsure, say N. 33 If unsure, say N.
34
35config BRIDGE_IGMP_SNOOPING
36 bool "IGMP snooping"
37 depends on BRIDGE
38 depends on INET
39 default y
40 ---help---
41 If you say Y here, then the Ethernet bridge will be able selectively
42 forward multicast traffic based on IGMP traffic received from each
43 port.
44
45 Say N to exclude this support and reduce the binary size.
46
47 If unsure, say Y.
diff --git a/net/bridge/Makefile b/net/bridge/Makefile
index f444c12cde5a..d0359ea8ee79 100644
--- a/net/bridge/Makefile
+++ b/net/bridge/Makefile
@@ -12,4 +12,6 @@ bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o
12 12
13bridge-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o 13bridge-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o
14 14
15bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o
16
15obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/ 17obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 1a99c4e04e85..90a9024e5c1e 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -25,6 +25,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
25 struct net_bridge *br = netdev_priv(dev); 25 struct net_bridge *br = netdev_priv(dev);
26 const unsigned char *dest = skb->data; 26 const unsigned char *dest = skb->data;
27 struct net_bridge_fdb_entry *dst; 27 struct net_bridge_fdb_entry *dst;
28 struct net_bridge_mdb_entry *mdst;
29
30 BR_INPUT_SKB_CB(skb)->brdev = dev;
28 31
29 dev->stats.tx_packets++; 32 dev->stats.tx_packets++;
30 dev->stats.tx_bytes += skb->len; 33 dev->stats.tx_bytes += skb->len;
@@ -32,13 +35,21 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
32 skb_reset_mac_header(skb); 35 skb_reset_mac_header(skb);
33 skb_pull(skb, ETH_HLEN); 36 skb_pull(skb, ETH_HLEN);
34 37
35 if (dest[0] & 1) 38 if (dest[0] & 1) {
36 br_flood_deliver(br, skb); 39 if (br_multicast_rcv(br, NULL, skb))
37 else if ((dst = __br_fdb_get(br, dest)) != NULL) 40 goto out;
41
42 mdst = br_mdb_get(br, skb);
43 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
44 br_multicast_deliver(mdst, skb);
45 else
46 br_flood_deliver(br, skb);
47 } else if ((dst = __br_fdb_get(br, dest)) != NULL)
38 br_deliver(dst->dst, skb); 48 br_deliver(dst->dst, skb);
39 else 49 else
40 br_flood_deliver(br, skb); 50 br_flood_deliver(br, skb);
41 51
52out:
42 return NETDEV_TX_OK; 53 return NETDEV_TX_OK;
43} 54}
44 55
@@ -49,6 +60,7 @@ static int br_dev_open(struct net_device *dev)
49 br_features_recompute(br); 60 br_features_recompute(br);
50 netif_start_queue(dev); 61 netif_start_queue(dev);
51 br_stp_enable_bridge(br); 62 br_stp_enable_bridge(br);
63 br_multicast_open(br);
52 64
53 return 0; 65 return 0;
54} 66}
@@ -59,7 +71,10 @@ static void br_dev_set_multicast_list(struct net_device *dev)
59 71
60static int br_dev_stop(struct net_device *dev) 72static int br_dev_stop(struct net_device *dev)
61{ 73{
62 br_stp_disable_bridge(netdev_priv(dev)); 74 struct net_bridge *br = netdev_priv(dev);
75
76 br_stp_disable_bridge(br);
77 br_multicast_stop(br);
63 78
64 netif_stop_queue(dev); 79 netif_stop_queue(dev);
65 80
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index bc1704ac6cd9..8dbec83e50ca 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -11,6 +11,7 @@
11 * 2 of the License, or (at your option) any later version. 11 * 2 of the License, or (at your option) any later version.
12 */ 12 */
13 13
14#include <linux/err.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/netdevice.h> 16#include <linux/netdevice.h>
16#include <linux/skbuff.h> 17#include <linux/skbuff.h>
@@ -18,6 +19,11 @@
18#include <linux/netfilter_bridge.h> 19#include <linux/netfilter_bridge.h>
19#include "br_private.h" 20#include "br_private.h"
20 21
22static int deliver_clone(const struct net_bridge_port *prev,
23 struct sk_buff *skb,
24 void (*__packet_hook)(const struct net_bridge_port *p,
25 struct sk_buff *skb));
26
21/* Don't forward packets to originating port or forwarding diasabled */ 27/* Don't forward packets to originating port or forwarding diasabled */
22static inline int should_deliver(const struct net_bridge_port *p, 28static inline int should_deliver(const struct net_bridge_port *p,
23 const struct sk_buff *skb) 29 const struct sk_buff *skb)
@@ -93,61 +99,167 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
93} 99}
94 100
95/* called with rcu_read_lock */ 101/* called with rcu_read_lock */
96void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) 102void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
97{ 103{
98 if (should_deliver(to, skb)) { 104 if (should_deliver(to, skb)) {
99 __br_forward(to, skb); 105 if (skb0)
106 deliver_clone(to, skb, __br_forward);
107 else
108 __br_forward(to, skb);
100 return; 109 return;
101 } 110 }
102 111
103 kfree_skb(skb); 112 if (!skb0)
113 kfree_skb(skb);
104} 114}
105 115
106/* called under bridge lock */ 116static int deliver_clone(const struct net_bridge_port *prev,
107static void br_flood(struct net_bridge *br, struct sk_buff *skb, 117 struct sk_buff *skb,
118 void (*__packet_hook)(const struct net_bridge_port *p,
119 struct sk_buff *skb))
120{
121 skb = skb_clone(skb, GFP_ATOMIC);
122 if (!skb) {
123 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
124
125 dev->stats.tx_dropped++;
126 return -ENOMEM;
127 }
128
129 __packet_hook(prev, skb);
130 return 0;
131}
132
133static struct net_bridge_port *maybe_deliver(
134 struct net_bridge_port *prev, struct net_bridge_port *p,
135 struct sk_buff *skb,
108 void (*__packet_hook)(const struct net_bridge_port *p, 136 void (*__packet_hook)(const struct net_bridge_port *p,
109 struct sk_buff *skb)) 137 struct sk_buff *skb))
110{ 138{
139 int err;
140
141 if (!should_deliver(p, skb))
142 return prev;
143
144 if (!prev)
145 goto out;
146
147 err = deliver_clone(prev, skb, __packet_hook);
148 if (err)
149 return ERR_PTR(err);
150
151out:
152 return p;
153}
154
155/* called under bridge lock */
156static void br_flood(struct net_bridge *br, struct sk_buff *skb,
157 struct sk_buff *skb0,
158 void (*__packet_hook)(const struct net_bridge_port *p,
159 struct sk_buff *skb))
160{
111 struct net_bridge_port *p; 161 struct net_bridge_port *p;
112 struct net_bridge_port *prev; 162 struct net_bridge_port *prev;
113 163
114 prev = NULL; 164 prev = NULL;
115 165
116 list_for_each_entry_rcu(p, &br->port_list, list) { 166 list_for_each_entry_rcu(p, &br->port_list, list) {
117 if (should_deliver(p, skb)) { 167 prev = maybe_deliver(prev, p, skb, __packet_hook);
118 if (prev != NULL) { 168 if (IS_ERR(prev))
119 struct sk_buff *skb2; 169 goto out;
120
121 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
122 br->dev->stats.tx_dropped++;
123 kfree_skb(skb);
124 return;
125 }
126
127 __packet_hook(prev, skb2);
128 }
129
130 prev = p;
131 }
132 } 170 }
133 171
134 if (prev != NULL) { 172 if (!prev)
173 goto out;
174
175 if (skb0)
176 deliver_clone(prev, skb, __packet_hook);
177 else
135 __packet_hook(prev, skb); 178 __packet_hook(prev, skb);
136 return; 179 return;
137 }
138 180
139 kfree_skb(skb); 181out:
182 if (!skb0)
183 kfree_skb(skb);
140} 184}
141 185
142 186
143/* called with rcu_read_lock */ 187/* called with rcu_read_lock */
144void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb) 188void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb)
145{ 189{
146 br_flood(br, skb, __br_deliver); 190 br_flood(br, skb, NULL, __br_deliver);
147} 191}
148 192
149/* called under bridge lock */ 193/* called under bridge lock */
150void br_flood_forward(struct net_bridge *br, struct sk_buff *skb) 194void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
195 struct sk_buff *skb2)
196{
197 br_flood(br, skb, skb2, __br_forward);
198}
199
200#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
201/* called with rcu_read_lock */
202static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
203 struct sk_buff *skb, struct sk_buff *skb0,
204 void (*__packet_hook)(
205 const struct net_bridge_port *p,
206 struct sk_buff *skb))
207{
208 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
209 struct net_bridge *br = netdev_priv(dev);
210 struct net_bridge_port *port;
211 struct net_bridge_port *lport, *rport;
212 struct net_bridge_port *prev;
213 struct net_bridge_port_group *p;
214 struct hlist_node *rp;
215
216 prev = NULL;
217
218 rp = br->router_list.first;
219 p = mdst ? mdst->ports : NULL;
220 while (p || rp) {
221 lport = p ? p->port : NULL;
222 rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
223 NULL;
224
225 port = (unsigned long)lport > (unsigned long)rport ?
226 lport : rport;
227
228 prev = maybe_deliver(prev, port, skb, __packet_hook);
229 if (IS_ERR(prev))
230 goto out;
231
232 if ((unsigned long)lport >= (unsigned long)port)
233 p = p->next;
234 if ((unsigned long)rport >= (unsigned long)port)
235 rp = rp->next;
236 }
237
238 if (!prev)
239 goto out;
240
241 if (skb0)
242 deliver_clone(prev, skb, __packet_hook);
243 else
244 __packet_hook(prev, skb);
245 return;
246
247out:
248 if (!skb0)
249 kfree_skb(skb);
250}
251
252/* called with rcu_read_lock */
253void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
254 struct sk_buff *skb)
255{
256 br_multicast_flood(mdst, skb, NULL, __br_deliver);
257}
258
259/* called with rcu_read_lock */
260void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
261 struct sk_buff *skb, struct sk_buff *skb2)
151{ 262{
152 br_flood(br, skb, __br_forward); 263 br_multicast_flood(mdst, skb, skb2, __br_forward);
153} 264}
265#endif
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index a2cbe61f6e65..b6a3872f5681 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -147,6 +147,8 @@ static void del_nbp(struct net_bridge_port *p)
147 147
148 rcu_assign_pointer(dev->br_port, NULL); 148 rcu_assign_pointer(dev->br_port, NULL);
149 149
150 br_multicast_del_port(p);
151
150 kobject_uevent(&p->kobj, KOBJ_REMOVE); 152 kobject_uevent(&p->kobj, KOBJ_REMOVE);
151 kobject_del(&p->kobj); 153 kobject_del(&p->kobj);
152 154
@@ -206,9 +208,8 @@ static struct net_device *new_bridge_dev(struct net *net, const char *name)
206 208
207 br_netfilter_rtable_init(br); 209 br_netfilter_rtable_init(br);
208 210
209 INIT_LIST_HEAD(&br->age_list);
210
211 br_stp_timer_init(br); 211 br_stp_timer_init(br);
212 br_multicast_init(br);
212 213
213 return dev; 214 return dev;
214} 215}
@@ -260,6 +261,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
260 br_init_port(p); 261 br_init_port(p);
261 p->state = BR_STATE_DISABLED; 262 p->state = BR_STATE_DISABLED;
262 br_stp_port_timer_init(p); 263 br_stp_port_timer_init(p);
264 br_multicast_add_port(p);
263 265
264 return p; 266 return p;
265} 267}
@@ -467,7 +469,7 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
467 return 0; 469 return 0;
468} 470}
469 471
470void br_net_exit(struct net *net) 472void __net_exit br_net_exit(struct net *net)
471{ 473{
472 struct net_device *dev; 474 struct net_device *dev;
473 LIST_HEAD(list); 475 LIST_HEAD(list);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5ee1a3682bf2..d74d570fc848 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -20,9 +20,9 @@
20/* Bridge group multicast address 802.1d (pg 51). */ 20/* Bridge group multicast address 802.1d (pg 51). */
21const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; 21const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
22 22
23static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb) 23static int br_pass_frame_up(struct sk_buff *skb)
24{ 24{
25 struct net_device *indev, *brdev = br->dev; 25 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
26 26
27 brdev->stats.rx_packets++; 27 brdev->stats.rx_packets++;
28 brdev->stats.rx_bytes += skb->len; 28 brdev->stats.rx_bytes += skb->len;
@@ -30,8 +30,8 @@ static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb)
30 indev = skb->dev; 30 indev = skb->dev;
31 skb->dev = brdev; 31 skb->dev = brdev;
32 32
33 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, 33 return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
34 netif_receive_skb); 34 netif_receive_skb);
35} 35}
36 36
37/* note: already called with rcu_read_lock (preempt_disabled) */ 37/* note: already called with rcu_read_lock (preempt_disabled) */
@@ -41,6 +41,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
41 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); 41 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
42 struct net_bridge *br; 42 struct net_bridge *br;
43 struct net_bridge_fdb_entry *dst; 43 struct net_bridge_fdb_entry *dst;
44 struct net_bridge_mdb_entry *mdst;
44 struct sk_buff *skb2; 45 struct sk_buff *skb2;
45 46
46 if (!p || p->state == BR_STATE_DISABLED) 47 if (!p || p->state == BR_STATE_DISABLED)
@@ -50,9 +51,15 @@ int br_handle_frame_finish(struct sk_buff *skb)
50 br = p->br; 51 br = p->br;
51 br_fdb_update(br, p, eth_hdr(skb)->h_source); 52 br_fdb_update(br, p, eth_hdr(skb)->h_source);
52 53
54 if (is_multicast_ether_addr(dest) &&
55 br_multicast_rcv(br, p, skb))
56 goto drop;
57
53 if (p->state == BR_STATE_LEARNING) 58 if (p->state == BR_STATE_LEARNING)
54 goto drop; 59 goto drop;
55 60
61 BR_INPUT_SKB_CB(skb)->brdev = br->dev;
62
56 /* The packet skb2 goes to the local host (NULL to skip). */ 63 /* The packet skb2 goes to the local host (NULL to skip). */
57 skb2 = NULL; 64 skb2 = NULL;
58 65
@@ -62,27 +69,35 @@ int br_handle_frame_finish(struct sk_buff *skb)
62 dst = NULL; 69 dst = NULL;
63 70
64 if (is_multicast_ether_addr(dest)) { 71 if (is_multicast_ether_addr(dest)) {
72 mdst = br_mdb_get(br, skb);
73 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
74 if ((mdst && !hlist_unhashed(&mdst->mglist)) ||
75 br_multicast_is_router(br))
76 skb2 = skb;
77 br_multicast_forward(mdst, skb, skb2);
78 skb = NULL;
79 if (!skb2)
80 goto out;
81 } else
82 skb2 = skb;
83
65 br->dev->stats.multicast++; 84 br->dev->stats.multicast++;
66 skb2 = skb;
67 } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) { 85 } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) {
68 skb2 = skb; 86 skb2 = skb;
69 /* Do not forward the packet since it's local. */ 87 /* Do not forward the packet since it's local. */
70 skb = NULL; 88 skb = NULL;
71 } 89 }
72 90
73 if (skb2 == skb)
74 skb2 = skb_clone(skb, GFP_ATOMIC);
75
76 if (skb2)
77 br_pass_frame_up(br, skb2);
78
79 if (skb) { 91 if (skb) {
80 if (dst) 92 if (dst)
81 br_forward(dst->dst, skb); 93 br_forward(dst->dst, skb, skb2);
82 else 94 else
83 br_flood_forward(br, skb); 95 br_flood_forward(br, skb, skb2);
84 } 96 }
85 97
98 if (skb2)
99 return br_pass_frame_up(skb2);
100
86out: 101out:
87 return 0; 102 return 0;
88drop: 103drop:
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
new file mode 100644
index 000000000000..6980625537ca
--- /dev/null
+++ b/net/bridge/br_multicast.c
@@ -0,0 +1,1309 @@
1/*
2 * Bridge multicast support.
3 *
4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#include <linux/err.h>
14#include <linux/if_ether.h>
15#include <linux/igmp.h>
16#include <linux/jhash.h>
17#include <linux/kernel.h>
18#include <linux/log2.h>
19#include <linux/netdevice.h>
20#include <linux/netfilter_bridge.h>
21#include <linux/random.h>
22#include <linux/rculist.h>
23#include <linux/skbuff.h>
24#include <linux/slab.h>
25#include <linux/timer.h>
26#include <net/ip.h>
27
28#include "br_private.h"
29
30static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
31{
32 return jhash_1word(mdb->secret, (u32)ip) & (mdb->max - 1);
33}
34
35static struct net_bridge_mdb_entry *__br_mdb_ip_get(
36 struct net_bridge_mdb_htable *mdb, __be32 dst, int hash)
37{
38 struct net_bridge_mdb_entry *mp;
39 struct hlist_node *p;
40
41 hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
42 if (dst == mp->addr)
43 return mp;
44 }
45
46 return NULL;
47}
48
49static struct net_bridge_mdb_entry *br_mdb_ip_get(
50 struct net_bridge_mdb_htable *mdb, __be32 dst)
51{
52 if (!mdb)
53 return NULL;
54
55 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
56}
57
58struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
59 struct sk_buff *skb)
60{
61 if (br->multicast_disabled)
62 return NULL;
63
64 switch (skb->protocol) {
65 case htons(ETH_P_IP):
66 if (BR_INPUT_SKB_CB(skb)->igmp)
67 break;
68 return br_mdb_ip_get(br->mdb, ip_hdr(skb)->daddr);
69 }
70
71 return NULL;
72}
73
74static void br_mdb_free(struct rcu_head *head)
75{
76 struct net_bridge_mdb_htable *mdb =
77 container_of(head, struct net_bridge_mdb_htable, rcu);
78 struct net_bridge_mdb_htable *old = mdb->old;
79
80 mdb->old = NULL;
81 kfree(old->mhash);
82 kfree(old);
83}
84
85static int br_mdb_copy(struct net_bridge_mdb_htable *new,
86 struct net_bridge_mdb_htable *old,
87 int elasticity)
88{
89 struct net_bridge_mdb_entry *mp;
90 struct hlist_node *p;
91 int maxlen;
92 int len;
93 int i;
94
95 for (i = 0; i < old->max; i++)
96 hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver])
97 hlist_add_head(&mp->hlist[new->ver],
98 &new->mhash[br_ip_hash(new, mp->addr)]);
99
100 if (!elasticity)
101 return 0;
102
103 maxlen = 0;
104 for (i = 0; i < new->max; i++) {
105 len = 0;
106 hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver])
107 len++;
108 if (len > maxlen)
109 maxlen = len;
110 }
111
112 return maxlen > elasticity ? -EINVAL : 0;
113}
114
115static void br_multicast_free_pg(struct rcu_head *head)
116{
117 struct net_bridge_port_group *p =
118 container_of(head, struct net_bridge_port_group, rcu);
119
120 kfree(p);
121}
122
123static void br_multicast_free_group(struct rcu_head *head)
124{
125 struct net_bridge_mdb_entry *mp =
126 container_of(head, struct net_bridge_mdb_entry, rcu);
127
128 kfree(mp);
129}
130
131static void br_multicast_group_expired(unsigned long data)
132{
133 struct net_bridge_mdb_entry *mp = (void *)data;
134 struct net_bridge *br = mp->br;
135 struct net_bridge_mdb_htable *mdb;
136
137 spin_lock(&br->multicast_lock);
138 if (!netif_running(br->dev) || timer_pending(&mp->timer))
139 goto out;
140
141 if (!hlist_unhashed(&mp->mglist))
142 hlist_del_init(&mp->mglist);
143
144 if (mp->ports)
145 goto out;
146
147 mdb = br->mdb;
148 hlist_del_rcu(&mp->hlist[mdb->ver]);
149 mdb->size--;
150
151 del_timer(&mp->query_timer);
152 call_rcu_bh(&mp->rcu, br_multicast_free_group);
153
154out:
155 spin_unlock(&br->multicast_lock);
156}
157
158static void br_multicast_del_pg(struct net_bridge *br,
159 struct net_bridge_port_group *pg)
160{
161 struct net_bridge_mdb_htable *mdb = br->mdb;
162 struct net_bridge_mdb_entry *mp;
163 struct net_bridge_port_group *p;
164 struct net_bridge_port_group **pp;
165
166 mp = br_mdb_ip_get(mdb, pg->addr);
167 if (WARN_ON(!mp))
168 return;
169
170 for (pp = &mp->ports; (p = *pp); pp = &p->next) {
171 if (p != pg)
172 continue;
173
174 *pp = p->next;
175 hlist_del_init(&p->mglist);
176 del_timer(&p->timer);
177 del_timer(&p->query_timer);
178 call_rcu_bh(&p->rcu, br_multicast_free_pg);
179
180 if (!mp->ports && hlist_unhashed(&mp->mglist) &&
181 netif_running(br->dev))
182 mod_timer(&mp->timer, jiffies);
183
184 return;
185 }
186
187 WARN_ON(1);
188}
189
190static void br_multicast_port_group_expired(unsigned long data)
191{
192 struct net_bridge_port_group *pg = (void *)data;
193 struct net_bridge *br = pg->port->br;
194
195 spin_lock(&br->multicast_lock);
196 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
197 hlist_unhashed(&pg->mglist))
198 goto out;
199
200 br_multicast_del_pg(br, pg);
201
202out:
203 spin_unlock(&br->multicast_lock);
204}
205
206static int br_mdb_rehash(struct net_bridge_mdb_htable **mdbp, int max,
207 int elasticity)
208{
209 struct net_bridge_mdb_htable *old = *mdbp;
210 struct net_bridge_mdb_htable *mdb;
211 int err;
212
213 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC);
214 if (!mdb)
215 return -ENOMEM;
216
217 mdb->max = max;
218 mdb->old = old;
219
220 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC);
221 if (!mdb->mhash) {
222 kfree(mdb);
223 return -ENOMEM;
224 }
225
226 mdb->size = old ? old->size : 0;
227 mdb->ver = old ? old->ver ^ 1 : 0;
228
229 if (!old || elasticity)
230 get_random_bytes(&mdb->secret, sizeof(mdb->secret));
231 else
232 mdb->secret = old->secret;
233
234 if (!old)
235 goto out;
236
237 err = br_mdb_copy(mdb, old, elasticity);
238 if (err) {
239 kfree(mdb->mhash);
240 kfree(mdb);
241 return err;
242 }
243
244 call_rcu_bh(&mdb->rcu, br_mdb_free);
245
246out:
247 rcu_assign_pointer(*mdbp, mdb);
248
249 return 0;
250}
251
252static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
253 __be32 group)
254{
255 struct sk_buff *skb;
256 struct igmphdr *ih;
257 struct ethhdr *eth;
258 struct iphdr *iph;
259
260 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
261 sizeof(*ih) + 4);
262 if (!skb)
263 goto out;
264
265 skb->protocol = htons(ETH_P_IP);
266
267 skb_reset_mac_header(skb);
268 eth = eth_hdr(skb);
269
270 memcpy(eth->h_source, br->dev->dev_addr, 6);
271 eth->h_dest[0] = 1;
272 eth->h_dest[1] = 0;
273 eth->h_dest[2] = 0x5e;
274 eth->h_dest[3] = 0;
275 eth->h_dest[4] = 0;
276 eth->h_dest[5] = 1;
277 eth->h_proto = htons(ETH_P_IP);
278 skb_put(skb, sizeof(*eth));
279
280 skb_set_network_header(skb, skb->len);
281 iph = ip_hdr(skb);
282
283 iph->version = 4;
284 iph->ihl = 6;
285 iph->tos = 0xc0;
286 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4);
287 iph->id = 0;
288 iph->frag_off = htons(IP_DF);
289 iph->ttl = 1;
290 iph->protocol = IPPROTO_IGMP;
291 iph->saddr = 0;
292 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
293 ((u8 *)&iph[1])[0] = IPOPT_RA;
294 ((u8 *)&iph[1])[1] = 4;
295 ((u8 *)&iph[1])[2] = 0;
296 ((u8 *)&iph[1])[3] = 0;
297 ip_send_check(iph);
298 skb_put(skb, 24);
299
300 skb_set_transport_header(skb, skb->len);
301 ih = igmp_hdr(skb);
302 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
303 ih->code = (group ? br->multicast_last_member_interval :
304 br->multicast_query_response_interval) /
305 (HZ / IGMP_TIMER_SCALE);
306 ih->group = group;
307 ih->csum = 0;
308 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
309 skb_put(skb, sizeof(*ih));
310
311 __skb_pull(skb, sizeof(*eth));
312
313out:
314 return skb;
315}
316
317static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
318{
319 struct net_bridge *br = mp->br;
320 struct sk_buff *skb;
321
322 skb = br_multicast_alloc_query(br, mp->addr);
323 if (!skb)
324 goto timer;
325
326 netif_rx(skb);
327
328timer:
329 if (++mp->queries_sent < br->multicast_last_member_count)
330 mod_timer(&mp->query_timer,
331 jiffies + br->multicast_last_member_interval);
332}
333
334static void br_multicast_group_query_expired(unsigned long data)
335{
336 struct net_bridge_mdb_entry *mp = (void *)data;
337 struct net_bridge *br = mp->br;
338
339 spin_lock(&br->multicast_lock);
340 if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) ||
341 mp->queries_sent >= br->multicast_last_member_count)
342 goto out;
343
344 br_multicast_send_group_query(mp);
345
346out:
347 spin_unlock(&br->multicast_lock);
348}
349
350static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
351{
352 struct net_bridge_port *port = pg->port;
353 struct net_bridge *br = port->br;
354 struct sk_buff *skb;
355
356 skb = br_multicast_alloc_query(br, pg->addr);
357 if (!skb)
358 goto timer;
359
360 br_deliver(port, skb);
361
362timer:
363 if (++pg->queries_sent < br->multicast_last_member_count)
364 mod_timer(&pg->query_timer,
365 jiffies + br->multicast_last_member_interval);
366}
367
368static void br_multicast_port_group_query_expired(unsigned long data)
369{
370 struct net_bridge_port_group *pg = (void *)data;
371 struct net_bridge_port *port = pg->port;
372 struct net_bridge *br = port->br;
373
374 spin_lock(&br->multicast_lock);
375 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
376 pg->queries_sent >= br->multicast_last_member_count)
377 goto out;
378
379 br_multicast_send_port_group_query(pg);
380
381out:
382 spin_unlock(&br->multicast_lock);
383}
384
385static struct net_bridge_mdb_entry *br_multicast_get_group(
386 struct net_bridge *br, struct net_bridge_port *port, __be32 group,
387 int hash)
388{
389 struct net_bridge_mdb_htable *mdb = br->mdb;
390 struct net_bridge_mdb_entry *mp;
391 struct hlist_node *p;
392 unsigned count = 0;
393 unsigned max;
394 int elasticity;
395 int err;
396
397 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
398 count++;
399 if (unlikely(group == mp->addr)) {
400 return mp;
401 }
402 }
403
404 elasticity = 0;
405 max = mdb->max;
406
407 if (unlikely(count > br->hash_elasticity && count)) {
408 if (net_ratelimit())
409 printk(KERN_INFO "%s: Multicast hash table "
410 "chain limit reached: %s\n",
411 br->dev->name, port ? port->dev->name :
412 br->dev->name);
413
414 elasticity = br->hash_elasticity;
415 }
416
417 if (mdb->size >= max) {
418 max *= 2;
419 if (unlikely(max >= br->hash_max)) {
420 printk(KERN_WARNING "%s: Multicast hash table maximum "
421 "reached, disabling snooping: %s, %d\n",
422 br->dev->name, port ? port->dev->name :
423 br->dev->name,
424 max);
425 err = -E2BIG;
426disable:
427 br->multicast_disabled = 1;
428 goto err;
429 }
430 }
431
432 if (max > mdb->max || elasticity) {
433 if (mdb->old) {
434 if (net_ratelimit())
435 printk(KERN_INFO "%s: Multicast hash table "
436 "on fire: %s\n",
437 br->dev->name, port ? port->dev->name :
438 br->dev->name);
439 err = -EEXIST;
440 goto err;
441 }
442
443 err = br_mdb_rehash(&br->mdb, max, elasticity);
444 if (err) {
445 printk(KERN_WARNING "%s: Cannot rehash multicast "
446 "hash table, disabling snooping: "
447 "%s, %d, %d\n",
448 br->dev->name, port ? port->dev->name :
449 br->dev->name,
450 mdb->size, err);
451 goto disable;
452 }
453
454 err = -EAGAIN;
455 goto err;
456 }
457
458 return NULL;
459
460err:
461 mp = ERR_PTR(err);
462 return mp;
463}
464
465static struct net_bridge_mdb_entry *br_multicast_new_group(
466 struct net_bridge *br, struct net_bridge_port *port, __be32 group)
467{
468 struct net_bridge_mdb_htable *mdb = br->mdb;
469 struct net_bridge_mdb_entry *mp;
470 int hash;
471
472 if (!mdb) {
473 if (br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0))
474 return NULL;
475 goto rehash;
476 }
477
478 hash = br_ip_hash(mdb, group);
479 mp = br_multicast_get_group(br, port, group, hash);
480 switch (PTR_ERR(mp)) {
481 case 0:
482 break;
483
484 case -EAGAIN:
485rehash:
486 mdb = br->mdb;
487 hash = br_ip_hash(mdb, group);
488 break;
489
490 default:
491 goto out;
492 }
493
494 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
495 if (unlikely(!mp))
496 goto out;
497
498 mp->br = br;
499 mp->addr = group;
500 setup_timer(&mp->timer, br_multicast_group_expired,
501 (unsigned long)mp);
502 setup_timer(&mp->query_timer, br_multicast_group_query_expired,
503 (unsigned long)mp);
504
505 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
506 mdb->size++;
507
508out:
509 return mp;
510}
511
512static int br_multicast_add_group(struct net_bridge *br,
513 struct net_bridge_port *port, __be32 group)
514{
515 struct net_bridge_mdb_entry *mp;
516 struct net_bridge_port_group *p;
517 struct net_bridge_port_group **pp;
518 unsigned long now = jiffies;
519 int err;
520
521 if (ipv4_is_local_multicast(group))
522 return 0;
523
524 spin_lock(&br->multicast_lock);
525 if (!netif_running(br->dev) ||
526 (port && port->state == BR_STATE_DISABLED))
527 goto out;
528
529 mp = br_multicast_new_group(br, port, group);
530 err = PTR_ERR(mp);
531 if (unlikely(IS_ERR(mp) || !mp))
532 goto err;
533
534 if (!port) {
535 hlist_add_head(&mp->mglist, &br->mglist);
536 mod_timer(&mp->timer, now + br->multicast_membership_interval);
537 goto out;
538 }
539
540 for (pp = &mp->ports; (p = *pp); pp = &p->next) {
541 if (p->port == port)
542 goto found;
543 if ((unsigned long)p->port < (unsigned long)port)
544 break;
545 }
546
547 p = kzalloc(sizeof(*p), GFP_ATOMIC);
548 err = -ENOMEM;
549 if (unlikely(!p))
550 goto err;
551
552 p->addr = group;
553 p->port = port;
554 p->next = *pp;
555 hlist_add_head(&p->mglist, &port->mglist);
556 setup_timer(&p->timer, br_multicast_port_group_expired,
557 (unsigned long)p);
558 setup_timer(&p->query_timer, br_multicast_port_group_query_expired,
559 (unsigned long)p);
560
561 rcu_assign_pointer(*pp, p);
562
563found:
564 mod_timer(&p->timer, now + br->multicast_membership_interval);
565out:
566 err = 0;
567
568err:
569 spin_unlock(&br->multicast_lock);
570 return err;
571}
572
573static void br_multicast_router_expired(unsigned long data)
574{
575 struct net_bridge_port *port = (void *)data;
576 struct net_bridge *br = port->br;
577
578 spin_lock(&br->multicast_lock);
579 if (port->multicast_router != 1 ||
580 timer_pending(&port->multicast_router_timer) ||
581 hlist_unhashed(&port->rlist))
582 goto out;
583
584 hlist_del_init_rcu(&port->rlist);
585
586out:
587 spin_unlock(&br->multicast_lock);
588}
589
590static void br_multicast_local_router_expired(unsigned long data)
591{
592}
593
594static void br_multicast_send_query(struct net_bridge *br,
595 struct net_bridge_port *port, u32 sent)
596{
597 unsigned long time;
598 struct sk_buff *skb;
599
600 if (!netif_running(br->dev) || br->multicast_disabled ||
601 timer_pending(&br->multicast_querier_timer))
602 return;
603
604 skb = br_multicast_alloc_query(br, 0);
605 if (!skb)
606 goto timer;
607
608 if (port) {
609 __skb_push(skb, sizeof(struct ethhdr));
610 skb->dev = port->dev;
611 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
612 dev_queue_xmit);
613 } else
614 netif_rx(skb);
615
616timer:
617 time = jiffies;
618 time += sent < br->multicast_startup_query_count ?
619 br->multicast_startup_query_interval :
620 br->multicast_query_interval;
621 mod_timer(port ? &port->multicast_query_timer :
622 &br->multicast_query_timer, time);
623}
624
625static void br_multicast_port_query_expired(unsigned long data)
626{
627 struct net_bridge_port *port = (void *)data;
628 struct net_bridge *br = port->br;
629
630 spin_lock(&br->multicast_lock);
631 if (port->state == BR_STATE_DISABLED ||
632 port->state == BR_STATE_BLOCKING)
633 goto out;
634
635 if (port->multicast_startup_queries_sent <
636 br->multicast_startup_query_count)
637 port->multicast_startup_queries_sent++;
638
639 br_multicast_send_query(port->br, port,
640 port->multicast_startup_queries_sent);
641
642out:
643 spin_unlock(&br->multicast_lock);
644}
645
646void br_multicast_add_port(struct net_bridge_port *port)
647{
648 port->multicast_router = 1;
649
650 setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
651 (unsigned long)port);
652 setup_timer(&port->multicast_query_timer,
653 br_multicast_port_query_expired, (unsigned long)port);
654}
655
656void br_multicast_del_port(struct net_bridge_port *port)
657{
658 del_timer_sync(&port->multicast_router_timer);
659}
660
661static void __br_multicast_enable_port(struct net_bridge_port *port)
662{
663 port->multicast_startup_queries_sent = 0;
664
665 if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 ||
666 del_timer(&port->multicast_query_timer))
667 mod_timer(&port->multicast_query_timer, jiffies);
668}
669
670void br_multicast_enable_port(struct net_bridge_port *port)
671{
672 struct net_bridge *br = port->br;
673
674 spin_lock(&br->multicast_lock);
675 if (br->multicast_disabled || !netif_running(br->dev))
676 goto out;
677
678 __br_multicast_enable_port(port);
679
680out:
681 spin_unlock(&br->multicast_lock);
682}
683
684void br_multicast_disable_port(struct net_bridge_port *port)
685{
686 struct net_bridge *br = port->br;
687 struct net_bridge_port_group *pg;
688 struct hlist_node *p, *n;
689
690 spin_lock(&br->multicast_lock);
691 hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist)
692 br_multicast_del_pg(br, pg);
693
694 if (!hlist_unhashed(&port->rlist))
695 hlist_del_init_rcu(&port->rlist);
696 del_timer(&port->multicast_router_timer);
697 del_timer(&port->multicast_query_timer);
698 spin_unlock(&br->multicast_lock);
699}
700
701static int br_multicast_igmp3_report(struct net_bridge *br,
702 struct net_bridge_port *port,
703 struct sk_buff *skb)
704{
705 struct igmpv3_report *ih;
706 struct igmpv3_grec *grec;
707 int i;
708 int len;
709 int num;
710 int type;
711 int err = 0;
712 __be32 group;
713
714 if (!pskb_may_pull(skb, sizeof(*ih)))
715 return -EINVAL;
716
717 ih = igmpv3_report_hdr(skb);
718 num = ntohs(ih->ngrec);
719 len = sizeof(*ih);
720
721 for (i = 0; i < num; i++) {
722 len += sizeof(*grec);
723 if (!pskb_may_pull(skb, len))
724 return -EINVAL;
725
726 grec = (void *)(skb->data + len);
727 group = grec->grec_mca;
728 type = grec->grec_type;
729
730 len += grec->grec_nsrcs * 4;
731 if (!pskb_may_pull(skb, len))
732 return -EINVAL;
733
734 /* We treat this as an IGMPv2 report for now. */
735 switch (type) {
736 case IGMPV3_MODE_IS_INCLUDE:
737 case IGMPV3_MODE_IS_EXCLUDE:
738 case IGMPV3_CHANGE_TO_INCLUDE:
739 case IGMPV3_CHANGE_TO_EXCLUDE:
740 case IGMPV3_ALLOW_NEW_SOURCES:
741 case IGMPV3_BLOCK_OLD_SOURCES:
742 break;
743
744 default:
745 continue;
746 }
747
748 err = br_multicast_add_group(br, port, group);
749 if (err)
750 break;
751 }
752
753 return err;
754}
755
756static void br_multicast_add_router(struct net_bridge *br,
757 struct net_bridge_port *port)
758{
759 struct hlist_node *p;
760 struct hlist_node **h;
761
762 for (h = &br->router_list.first;
763 (p = *h) &&
764 (unsigned long)container_of(p, struct net_bridge_port, rlist) >
765 (unsigned long)port;
766 h = &p->next)
767 ;
768
769 port->rlist.pprev = h;
770 port->rlist.next = p;
771 rcu_assign_pointer(*h, &port->rlist);
772 if (p)
773 p->pprev = &port->rlist.next;
774}
775
776static void br_multicast_mark_router(struct net_bridge *br,
777 struct net_bridge_port *port)
778{
779 unsigned long now = jiffies;
780
781 if (!port) {
782 if (br->multicast_router == 1)
783 mod_timer(&br->multicast_router_timer,
784 now + br->multicast_querier_interval);
785 return;
786 }
787
788 if (port->multicast_router != 1)
789 return;
790
791 if (!hlist_unhashed(&port->rlist))
792 goto timer;
793
794 br_multicast_add_router(br, port);
795
796timer:
797 mod_timer(&port->multicast_router_timer,
798 now + br->multicast_querier_interval);
799}
800
801static void br_multicast_query_received(struct net_bridge *br,
802 struct net_bridge_port *port,
803 __be32 saddr)
804{
805 if (saddr)
806 mod_timer(&br->multicast_querier_timer,
807 jiffies + br->multicast_querier_interval);
808 else if (timer_pending(&br->multicast_querier_timer))
809 return;
810
811 br_multicast_mark_router(br, port);
812}
813
814static int br_multicast_query(struct net_bridge *br,
815 struct net_bridge_port *port,
816 struct sk_buff *skb)
817{
818 struct iphdr *iph = ip_hdr(skb);
819 struct igmphdr *ih = igmp_hdr(skb);
820 struct net_bridge_mdb_entry *mp;
821 struct igmpv3_query *ih3;
822 struct net_bridge_port_group *p;
823 struct net_bridge_port_group **pp;
824 unsigned long max_delay;
825 unsigned long now = jiffies;
826 __be32 group;
827 int err = 0;
828
829 spin_lock(&br->multicast_lock);
830 if (!netif_running(br->dev) ||
831 (port && port->state == BR_STATE_DISABLED))
832 goto out;
833
834 br_multicast_query_received(br, port, iph->saddr);
835
836 group = ih->group;
837
838 if (skb->len == sizeof(*ih)) {
839 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
840
841 if (!max_delay) {
842 max_delay = 10 * HZ;
843 group = 0;
844 }
845 } else {
846 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) {
847 err = -EINVAL;
848 goto out;
849 }
850
851 ih3 = igmpv3_query_hdr(skb);
852 if (ih3->nsrcs)
853 goto out;
854
855 max_delay = ih3->code ?
856 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
857 }
858
859 if (!group)
860 goto out;
861
862 mp = br_mdb_ip_get(br->mdb, group);
863 if (!mp)
864 goto out;
865
866 max_delay *= br->multicast_last_member_count;
867
868 if (!hlist_unhashed(&mp->mglist) &&
869 (timer_pending(&mp->timer) ?
870 time_after(mp->timer.expires, now + max_delay) :
871 try_to_del_timer_sync(&mp->timer) >= 0))
872 mod_timer(&mp->timer, now + max_delay);
873
874 for (pp = &mp->ports; (p = *pp); pp = &p->next) {
875 if (timer_pending(&p->timer) ?
876 time_after(p->timer.expires, now + max_delay) :
877 try_to_del_timer_sync(&p->timer) >= 0)
878 mod_timer(&mp->timer, now + max_delay);
879 }
880
881out:
882 spin_unlock(&br->multicast_lock);
883 return err;
884}
885
886static void br_multicast_leave_group(struct net_bridge *br,
887 struct net_bridge_port *port,
888 __be32 group)
889{
890 struct net_bridge_mdb_htable *mdb;
891 struct net_bridge_mdb_entry *mp;
892 struct net_bridge_port_group *p;
893 unsigned long now;
894 unsigned long time;
895
896 if (ipv4_is_local_multicast(group))
897 return;
898
899 spin_lock(&br->multicast_lock);
900 if (!netif_running(br->dev) ||
901 (port && port->state == BR_STATE_DISABLED) ||
902 timer_pending(&br->multicast_querier_timer))
903 goto out;
904
905 mdb = br->mdb;
906 mp = br_mdb_ip_get(mdb, group);
907 if (!mp)
908 goto out;
909
910 now = jiffies;
911 time = now + br->multicast_last_member_count *
912 br->multicast_last_member_interval;
913
914 if (!port) {
915 if (!hlist_unhashed(&mp->mglist) &&
916 (timer_pending(&mp->timer) ?
917 time_after(mp->timer.expires, time) :
918 try_to_del_timer_sync(&mp->timer) >= 0)) {
919 mod_timer(&mp->timer, time);
920
921 mp->queries_sent = 0;
922 mod_timer(&mp->query_timer, now);
923 }
924
925 goto out;
926 }
927
928 for (p = mp->ports; p; p = p->next) {
929 if (p->port != port)
930 continue;
931
932 if (!hlist_unhashed(&p->mglist) &&
933 (timer_pending(&p->timer) ?
934 time_after(p->timer.expires, time) :
935 try_to_del_timer_sync(&p->timer) >= 0)) {
936 mod_timer(&p->timer, time);
937
938 p->queries_sent = 0;
939 mod_timer(&p->query_timer, now);
940 }
941
942 break;
943 }
944
945out:
946 spin_unlock(&br->multicast_lock);
947}
948
949static int br_multicast_ipv4_rcv(struct net_bridge *br,
950 struct net_bridge_port *port,
951 struct sk_buff *skb)
952{
953 struct sk_buff *skb2 = skb;
954 struct iphdr *iph;
955 struct igmphdr *ih;
956 unsigned len;
957 unsigned offset;
958 int err;
959
960 BR_INPUT_SKB_CB(skb)->igmp = 0;
961 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
962
963 /* We treat OOM as packet loss for now. */
964 if (!pskb_may_pull(skb, sizeof(*iph)))
965 return -EINVAL;
966
967 iph = ip_hdr(skb);
968
969 if (iph->ihl < 5 || iph->version != 4)
970 return -EINVAL;
971
972 if (!pskb_may_pull(skb, ip_hdrlen(skb)))
973 return -EINVAL;
974
975 iph = ip_hdr(skb);
976
977 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
978 return -EINVAL;
979
980 if (iph->protocol != IPPROTO_IGMP)
981 return 0;
982
983 len = ntohs(iph->tot_len);
984 if (skb->len < len || len < ip_hdrlen(skb))
985 return -EINVAL;
986
987 if (skb->len > len) {
988 skb2 = skb_clone(skb, GFP_ATOMIC);
989 if (!skb2)
990 return -ENOMEM;
991
992 err = pskb_trim_rcsum(skb2, len);
993 if (err)
994 goto err_out;
995 }
996
997 len -= ip_hdrlen(skb2);
998 offset = skb_network_offset(skb2) + ip_hdrlen(skb2);
999 __skb_pull(skb2, offset);
1000 skb_reset_transport_header(skb2);
1001
1002 err = -EINVAL;
1003 if (!pskb_may_pull(skb2, sizeof(*ih)))
1004 goto out;
1005
1006 iph = ip_hdr(skb2);
1007
1008 switch (skb2->ip_summed) {
1009 case CHECKSUM_COMPLETE:
1010 if (!csum_fold(skb2->csum))
1011 break;
1012 /* fall through */
1013 case CHECKSUM_NONE:
1014 skb2->csum = 0;
1015 if (skb_checksum_complete(skb2))
1016 goto out;
1017 }
1018
1019 err = 0;
1020
1021 BR_INPUT_SKB_CB(skb)->igmp = 1;
1022 ih = igmp_hdr(skb2);
1023
1024 switch (ih->type) {
1025 case IGMP_HOST_MEMBERSHIP_REPORT:
1026 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1027 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
1028 err = br_multicast_add_group(br, port, ih->group);
1029 break;
1030 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1031 err = br_multicast_igmp3_report(br, port, skb2);
1032 break;
1033 case IGMP_HOST_MEMBERSHIP_QUERY:
1034 err = br_multicast_query(br, port, skb2);
1035 break;
1036 case IGMP_HOST_LEAVE_MESSAGE:
1037 br_multicast_leave_group(br, port, ih->group);
1038 break;
1039 }
1040
1041out:
1042 __skb_push(skb2, offset);
1043err_out:
1044 if (skb2 != skb)
1045 kfree_skb(skb2);
1046 return err;
1047}
1048
1049int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1050 struct sk_buff *skb)
1051{
1052 if (br->multicast_disabled)
1053 return 0;
1054
1055 switch (skb->protocol) {
1056 case htons(ETH_P_IP):
1057 return br_multicast_ipv4_rcv(br, port, skb);
1058 }
1059
1060 return 0;
1061}
1062
1063static void br_multicast_query_expired(unsigned long data)
1064{
1065 struct net_bridge *br = (void *)data;
1066
1067 spin_lock(&br->multicast_lock);
1068 if (br->multicast_startup_queries_sent <
1069 br->multicast_startup_query_count)
1070 br->multicast_startup_queries_sent++;
1071
1072 br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent);
1073
1074 spin_unlock(&br->multicast_lock);
1075}
1076
1077void br_multicast_init(struct net_bridge *br)
1078{
1079 br->hash_elasticity = 4;
1080 br->hash_max = 512;
1081
1082 br->multicast_router = 1;
1083 br->multicast_last_member_count = 2;
1084 br->multicast_startup_query_count = 2;
1085
1086 br->multicast_last_member_interval = HZ;
1087 br->multicast_query_response_interval = 10 * HZ;
1088 br->multicast_startup_query_interval = 125 * HZ / 4;
1089 br->multicast_query_interval = 125 * HZ;
1090 br->multicast_querier_interval = 255 * HZ;
1091 br->multicast_membership_interval = 260 * HZ;
1092
1093 spin_lock_init(&br->multicast_lock);
1094 setup_timer(&br->multicast_router_timer,
1095 br_multicast_local_router_expired, 0);
1096 setup_timer(&br->multicast_querier_timer,
1097 br_multicast_local_router_expired, 0);
1098 setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
1099 (unsigned long)br);
1100}
1101
1102void br_multicast_open(struct net_bridge *br)
1103{
1104 br->multicast_startup_queries_sent = 0;
1105
1106 if (br->multicast_disabled)
1107 return;
1108
1109 mod_timer(&br->multicast_query_timer, jiffies);
1110}
1111
1112void br_multicast_stop(struct net_bridge *br)
1113{
1114 struct net_bridge_mdb_htable *mdb;
1115 struct net_bridge_mdb_entry *mp;
1116 struct hlist_node *p, *n;
1117 u32 ver;
1118 int i;
1119
1120 del_timer_sync(&br->multicast_router_timer);
1121 del_timer_sync(&br->multicast_querier_timer);
1122 del_timer_sync(&br->multicast_query_timer);
1123
1124 spin_lock_bh(&br->multicast_lock);
1125 mdb = br->mdb;
1126 if (!mdb)
1127 goto out;
1128
1129 br->mdb = NULL;
1130
1131 ver = mdb->ver;
1132 for (i = 0; i < mdb->max; i++) {
1133 hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
1134 hlist[ver]) {
1135 del_timer(&mp->timer);
1136 del_timer(&mp->query_timer);
1137 call_rcu_bh(&mp->rcu, br_multicast_free_group);
1138 }
1139 }
1140
1141 if (mdb->old) {
1142 spin_unlock_bh(&br->multicast_lock);
1143 rcu_barrier_bh();
1144 spin_lock_bh(&br->multicast_lock);
1145 WARN_ON(mdb->old);
1146 }
1147
1148 mdb->old = mdb;
1149 call_rcu_bh(&mdb->rcu, br_mdb_free);
1150
1151out:
1152 spin_unlock_bh(&br->multicast_lock);
1153}
1154
1155int br_multicast_set_router(struct net_bridge *br, unsigned long val)
1156{
1157 int err = -ENOENT;
1158
1159 spin_lock_bh(&br->multicast_lock);
1160 if (!netif_running(br->dev))
1161 goto unlock;
1162
1163 switch (val) {
1164 case 0:
1165 case 2:
1166 del_timer(&br->multicast_router_timer);
1167 /* fall through */
1168 case 1:
1169 br->multicast_router = val;
1170 err = 0;
1171 break;
1172
1173 default:
1174 err = -EINVAL;
1175 break;
1176 }
1177
1178unlock:
1179 spin_unlock_bh(&br->multicast_lock);
1180
1181 return err;
1182}
1183
1184int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
1185{
1186 struct net_bridge *br = p->br;
1187 int err = -ENOENT;
1188
1189 spin_lock(&br->multicast_lock);
1190 if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED)
1191 goto unlock;
1192
1193 switch (val) {
1194 case 0:
1195 case 1:
1196 case 2:
1197 p->multicast_router = val;
1198 err = 0;
1199
1200 if (val < 2 && !hlist_unhashed(&p->rlist))
1201 hlist_del_init_rcu(&p->rlist);
1202
1203 if (val == 1)
1204 break;
1205
1206 del_timer(&p->multicast_router_timer);
1207
1208 if (val == 0)
1209 break;
1210
1211 br_multicast_add_router(br, p);
1212 break;
1213
1214 default:
1215 err = -EINVAL;
1216 break;
1217 }
1218
1219unlock:
1220 spin_unlock(&br->multicast_lock);
1221
1222 return err;
1223}
1224
1225int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1226{
1227 struct net_bridge_port *port;
1228 int err = -ENOENT;
1229
1230 spin_lock(&br->multicast_lock);
1231 if (!netif_running(br->dev))
1232 goto unlock;
1233
1234 err = 0;
1235 if (br->multicast_disabled == !val)
1236 goto unlock;
1237
1238 br->multicast_disabled = !val;
1239 if (br->multicast_disabled)
1240 goto unlock;
1241
1242 if (br->mdb) {
1243 if (br->mdb->old) {
1244 err = -EEXIST;
1245rollback:
1246 br->multicast_disabled = !!val;
1247 goto unlock;
1248 }
1249
1250 err = br_mdb_rehash(&br->mdb, br->mdb->max,
1251 br->hash_elasticity);
1252 if (err)
1253 goto rollback;
1254 }
1255
1256 br_multicast_open(br);
1257 list_for_each_entry(port, &br->port_list, list) {
1258 if (port->state == BR_STATE_DISABLED ||
1259 port->state == BR_STATE_BLOCKING)
1260 continue;
1261
1262 __br_multicast_enable_port(port);
1263 }
1264
1265unlock:
1266 spin_unlock(&br->multicast_lock);
1267
1268 return err;
1269}
1270
1271int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1272{
1273 int err = -ENOENT;
1274 u32 old;
1275
1276 spin_lock(&br->multicast_lock);
1277 if (!netif_running(br->dev))
1278 goto unlock;
1279
1280 err = -EINVAL;
1281 if (!is_power_of_2(val))
1282 goto unlock;
1283 if (br->mdb && val < br->mdb->size)
1284 goto unlock;
1285
1286 err = 0;
1287
1288 old = br->hash_max;
1289 br->hash_max = val;
1290
1291 if (br->mdb) {
1292 if (br->mdb->old) {
1293 err = -EEXIST;
1294rollback:
1295 br->hash_max = old;
1296 goto unlock;
1297 }
1298
1299 err = br_mdb_rehash(&br->mdb, br->hash_max,
1300 br->hash_elasticity);
1301 if (err)
1302 goto rollback;
1303 }
1304
1305unlock:
1306 spin_unlock(&br->multicast_lock);
1307
1308 return err;
1309}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2114e45682ea..846d7d1e2075 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -57,6 +57,41 @@ struct net_bridge_fdb_entry
57 unsigned char is_static; 57 unsigned char is_static;
58}; 58};
59 59
60struct net_bridge_port_group {
61 struct net_bridge_port *port;
62 struct net_bridge_port_group *next;
63 struct hlist_node mglist;
64 struct rcu_head rcu;
65 struct timer_list timer;
66 struct timer_list query_timer;
67 __be32 addr;
68 u32 queries_sent;
69};
70
71struct net_bridge_mdb_entry
72{
73 struct hlist_node hlist[2];
74 struct hlist_node mglist;
75 struct net_bridge *br;
76 struct net_bridge_port_group *ports;
77 struct rcu_head rcu;
78 struct timer_list timer;
79 struct timer_list query_timer;
80 __be32 addr;
81 u32 queries_sent;
82};
83
84struct net_bridge_mdb_htable
85{
86 struct hlist_head *mhash;
87 struct rcu_head rcu;
88 struct net_bridge_mdb_htable *old;
89 u32 size;
90 u32 max;
91 u32 secret;
92 u32 ver;
93};
94
60struct net_bridge_port 95struct net_bridge_port
61{ 96{
62 struct net_bridge *br; 97 struct net_bridge *br;
@@ -84,6 +119,15 @@ struct net_bridge_port
84 119
85 unsigned long flags; 120 unsigned long flags;
86#define BR_HAIRPIN_MODE 0x00000001 121#define BR_HAIRPIN_MODE 0x00000001
122
123#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
124 u32 multicast_startup_queries_sent;
125 unsigned char multicast_router;
126 struct timer_list multicast_router_timer;
127 struct timer_list multicast_query_timer;
128 struct hlist_head mglist;
129 struct hlist_node rlist;
130#endif
87}; 131};
88 132
89struct net_bridge 133struct net_bridge
@@ -93,7 +137,6 @@ struct net_bridge
93 struct net_device *dev; 137 struct net_device *dev;
94 spinlock_t hash_lock; 138 spinlock_t hash_lock;
95 struct hlist_head hash[BR_HASH_SIZE]; 139 struct hlist_head hash[BR_HASH_SIZE];
96 struct list_head age_list;
97 unsigned long feature_mask; 140 unsigned long feature_mask;
98#ifdef CONFIG_BRIDGE_NETFILTER 141#ifdef CONFIG_BRIDGE_NETFILTER
99 struct rtable fake_rtable; 142 struct rtable fake_rtable;
@@ -125,6 +168,35 @@ struct net_bridge
125 unsigned char topology_change; 168 unsigned char topology_change;
126 unsigned char topology_change_detected; 169 unsigned char topology_change_detected;
127 170
171#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
172 unsigned char multicast_router;
173
174 u8 multicast_disabled:1;
175
176 u32 hash_elasticity;
177 u32 hash_max;
178
179 u32 multicast_last_member_count;
180 u32 multicast_startup_queries_sent;
181 u32 multicast_startup_query_count;
182
183 unsigned long multicast_last_member_interval;
184 unsigned long multicast_membership_interval;
185 unsigned long multicast_querier_interval;
186 unsigned long multicast_query_interval;
187 unsigned long multicast_query_response_interval;
188 unsigned long multicast_startup_query_interval;
189
190 spinlock_t multicast_lock;
191 struct net_bridge_mdb_htable *mdb;
192 struct hlist_head router_list;
193 struct hlist_head mglist;
194
195 struct timer_list multicast_router_timer;
196 struct timer_list multicast_querier_timer;
197 struct timer_list multicast_query_timer;
198#endif
199
128 struct timer_list hello_timer; 200 struct timer_list hello_timer;
129 struct timer_list tcn_timer; 201 struct timer_list tcn_timer;
130 struct timer_list topology_change_timer; 202 struct timer_list topology_change_timer;
@@ -132,6 +204,22 @@ struct net_bridge
132 struct kobject *ifobj; 204 struct kobject *ifobj;
133}; 205};
134 206
207struct br_input_skb_cb {
208 struct net_device *brdev;
209#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
210 int igmp;
211 int mrouters_only;
212#endif
213};
214
215#define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb)
216
217#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
218# define BR_INPUT_SKB_CB_MROUTERS_ONLY(__skb) (BR_INPUT_SKB_CB(__skb)->mrouters_only)
219#else
220# define BR_INPUT_SKB_CB_MROUTERS_ONLY(__skb) (0)
221#endif
222
135extern struct notifier_block br_device_notifier; 223extern struct notifier_block br_device_notifier;
136extern const u8 br_group_address[ETH_ALEN]; 224extern const u8 br_group_address[ETH_ALEN];
137 225
@@ -172,10 +260,11 @@ extern void br_deliver(const struct net_bridge_port *to,
172 struct sk_buff *skb); 260 struct sk_buff *skb);
173extern int br_dev_queue_push_xmit(struct sk_buff *skb); 261extern int br_dev_queue_push_xmit(struct sk_buff *skb);
174extern void br_forward(const struct net_bridge_port *to, 262extern void br_forward(const struct net_bridge_port *to,
175 struct sk_buff *skb); 263 struct sk_buff *skb, struct sk_buff *skb0);
176extern int br_forward_finish(struct sk_buff *skb); 264extern int br_forward_finish(struct sk_buff *skb);
177extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb); 265extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb);
178extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb); 266extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
267 struct sk_buff *skb2);
179 268
180/* br_if.c */ 269/* br_if.c */
181extern void br_port_carrier_check(struct net_bridge_port *p); 270extern void br_port_carrier_check(struct net_bridge_port *p);
@@ -198,6 +287,94 @@ extern struct sk_buff *br_handle_frame(struct net_bridge_port *p,
198extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 287extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
199extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *arg); 288extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *arg);
200 289
290/* br_multicast.c */
291#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
292extern int br_multicast_rcv(struct net_bridge *br,
293 struct net_bridge_port *port,
294 struct sk_buff *skb);
295extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
296 struct sk_buff *skb);
297extern void br_multicast_add_port(struct net_bridge_port *port);
298extern void br_multicast_del_port(struct net_bridge_port *port);
299extern void br_multicast_enable_port(struct net_bridge_port *port);
300extern void br_multicast_disable_port(struct net_bridge_port *port);
301extern void br_multicast_init(struct net_bridge *br);
302extern void br_multicast_open(struct net_bridge *br);
303extern void br_multicast_stop(struct net_bridge *br);
304extern void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
305 struct sk_buff *skb);
306extern void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
307 struct sk_buff *skb, struct sk_buff *skb2);
308extern int br_multicast_set_router(struct net_bridge *br, unsigned long val);
309extern int br_multicast_set_port_router(struct net_bridge_port *p,
310 unsigned long val);
311extern int br_multicast_toggle(struct net_bridge *br, unsigned long val);
312extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
313
314static inline bool br_multicast_is_router(struct net_bridge *br)
315{
316 return br->multicast_router == 2 ||
317 (br->multicast_router == 1 &&
318 timer_pending(&br->multicast_router_timer));
319}
320#else
321static inline int br_multicast_rcv(struct net_bridge *br,
322 struct net_bridge_port *port,
323 struct sk_buff *skb)
324{
325 return 0;
326}
327
328static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
329 struct sk_buff *skb)
330{
331 return NULL;
332}
333
334static inline void br_multicast_add_port(struct net_bridge_port *port)
335{
336}
337
338static inline void br_multicast_del_port(struct net_bridge_port *port)
339{
340}
341
342static inline void br_multicast_enable_port(struct net_bridge_port *port)
343{
344}
345
346static inline void br_multicast_disable_port(struct net_bridge_port *port)
347{
348}
349
350static inline void br_multicast_init(struct net_bridge *br)
351{
352}
353
354static inline void br_multicast_open(struct net_bridge *br)
355{
356}
357
358static inline void br_multicast_stop(struct net_bridge *br)
359{
360}
361
362static inline void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
363 struct sk_buff *skb)
364{
365}
366
367static inline void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
368 struct sk_buff *skb,
369 struct sk_buff *skb2)
370{
371}
372static inline bool br_multicast_is_router(struct net_bridge *br)
373{
374 return 0;
375}
376#endif
377
201/* br_netfilter.c */ 378/* br_netfilter.c */
202#ifdef CONFIG_BRIDGE_NETFILTER 379#ifdef CONFIG_BRIDGE_NETFILTER
203extern int br_netfilter_init(void); 380extern int br_netfilter_init(void);
@@ -254,7 +431,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
254 431
255#ifdef CONFIG_SYSFS 432#ifdef CONFIG_SYSFS
256/* br_sysfs_if.c */ 433/* br_sysfs_if.c */
257extern struct sysfs_ops brport_sysfs_ops; 434extern const struct sysfs_ops brport_sysfs_ops;
258extern int br_sysfs_addif(struct net_bridge_port *p); 435extern int br_sysfs_addif(struct net_bridge_port *p);
259 436
260/* br_sysfs_br.c */ 437/* br_sysfs_br.c */
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index fd3f8d6c0998..edcf14b560f6 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -386,6 +386,8 @@ static void br_make_forwarding(struct net_bridge_port *p)
386 else 386 else
387 p->state = BR_STATE_LEARNING; 387 p->state = BR_STATE_LEARNING;
388 388
389 br_multicast_enable_port(p);
390
389 br_log_state(p); 391 br_log_state(p);
390 392
391 if (br->forward_delay != 0) 393 if (br->forward_delay != 0)
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 9a52ac5b4525..d527119e9f54 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -108,6 +108,7 @@ void br_stp_disable_port(struct net_bridge_port *p)
108 del_timer(&p->hold_timer); 108 del_timer(&p->hold_timer);
109 109
110 br_fdb_delete_by_port(br, p, 0); 110 br_fdb_delete_by_port(br, p, 0);
111 br_multicast_disable_port(p);
111 112
112 br_configuration_update(br); 113 br_configuration_update(br);
113 114
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index bee4f300d0c8..dd321e39e621 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -345,6 +345,273 @@ static ssize_t store_flush(struct device *d,
345} 345}
346static DEVICE_ATTR(flush, S_IWUSR, NULL, store_flush); 346static DEVICE_ATTR(flush, S_IWUSR, NULL, store_flush);
347 347
348#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
349static ssize_t show_multicast_router(struct device *d,
350 struct device_attribute *attr, char *buf)
351{
352 struct net_bridge *br = to_bridge(d);
353 return sprintf(buf, "%d\n", br->multicast_router);
354}
355
356static ssize_t store_multicast_router(struct device *d,
357 struct device_attribute *attr,
358 const char *buf, size_t len)
359{
360 return store_bridge_parm(d, buf, len, br_multicast_set_router);
361}
362static DEVICE_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
363 store_multicast_router);
364
365static ssize_t show_multicast_snooping(struct device *d,
366 struct device_attribute *attr,
367 char *buf)
368{
369 struct net_bridge *br = to_bridge(d);
370 return sprintf(buf, "%d\n", !br->multicast_disabled);
371}
372
373static ssize_t store_multicast_snooping(struct device *d,
374 struct device_attribute *attr,
375 const char *buf, size_t len)
376{
377 return store_bridge_parm(d, buf, len, br_multicast_toggle);
378}
379static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR,
380 show_multicast_snooping, store_multicast_snooping);
381
382static ssize_t show_hash_elasticity(struct device *d,
383 struct device_attribute *attr, char *buf)
384{
385 struct net_bridge *br = to_bridge(d);
386 return sprintf(buf, "%u\n", br->hash_elasticity);
387}
388
389static int set_elasticity(struct net_bridge *br, unsigned long val)
390{
391 br->hash_elasticity = val;
392 return 0;
393}
394
395static ssize_t store_hash_elasticity(struct device *d,
396 struct device_attribute *attr,
397 const char *buf, size_t len)
398{
399 return store_bridge_parm(d, buf, len, set_elasticity);
400}
401static DEVICE_ATTR(hash_elasticity, S_IRUGO | S_IWUSR, show_hash_elasticity,
402 store_hash_elasticity);
403
404static ssize_t show_hash_max(struct device *d, struct device_attribute *attr,
405 char *buf)
406{
407 struct net_bridge *br = to_bridge(d);
408 return sprintf(buf, "%u\n", br->hash_max);
409}
410
411static ssize_t store_hash_max(struct device *d, struct device_attribute *attr,
412 const char *buf, size_t len)
413{
414 return store_bridge_parm(d, buf, len, br_multicast_set_hash_max);
415}
416static DEVICE_ATTR(hash_max, S_IRUGO | S_IWUSR, show_hash_max,
417 store_hash_max);
418
419static ssize_t show_multicast_last_member_count(struct device *d,
420 struct device_attribute *attr,
421 char *buf)
422{
423 struct net_bridge *br = to_bridge(d);
424 return sprintf(buf, "%u\n", br->multicast_last_member_count);
425}
426
427static int set_last_member_count(struct net_bridge *br, unsigned long val)
428{
429 br->multicast_last_member_count = val;
430 return 0;
431}
432
433static ssize_t store_multicast_last_member_count(struct device *d,
434 struct device_attribute *attr,
435 const char *buf, size_t len)
436{
437 return store_bridge_parm(d, buf, len, set_last_member_count);
438}
439static DEVICE_ATTR(multicast_last_member_count, S_IRUGO | S_IWUSR,
440 show_multicast_last_member_count,
441 store_multicast_last_member_count);
442
443static ssize_t show_multicast_startup_query_count(
444 struct device *d, struct device_attribute *attr, char *buf)
445{
446 struct net_bridge *br = to_bridge(d);
447 return sprintf(buf, "%u\n", br->multicast_startup_query_count);
448}
449
450static int set_startup_query_count(struct net_bridge *br, unsigned long val)
451{
452 br->multicast_startup_query_count = val;
453 return 0;
454}
455
456static ssize_t store_multicast_startup_query_count(
457 struct device *d, struct device_attribute *attr, const char *buf,
458 size_t len)
459{
460 return store_bridge_parm(d, buf, len, set_startup_query_count);
461}
462static DEVICE_ATTR(multicast_startup_query_count, S_IRUGO | S_IWUSR,
463 show_multicast_startup_query_count,
464 store_multicast_startup_query_count);
465
466static ssize_t show_multicast_last_member_interval(
467 struct device *d, struct device_attribute *attr, char *buf)
468{
469 struct net_bridge *br = to_bridge(d);
470 return sprintf(buf, "%lu\n",
471 jiffies_to_clock_t(br->multicast_last_member_interval));
472}
473
474static int set_last_member_interval(struct net_bridge *br, unsigned long val)
475{
476 br->multicast_last_member_interval = clock_t_to_jiffies(val);
477 return 0;
478}
479
480static ssize_t store_multicast_last_member_interval(
481 struct device *d, struct device_attribute *attr, const char *buf,
482 size_t len)
483{
484 return store_bridge_parm(d, buf, len, set_last_member_interval);
485}
486static DEVICE_ATTR(multicast_last_member_interval, S_IRUGO | S_IWUSR,
487 show_multicast_last_member_interval,
488 store_multicast_last_member_interval);
489
490static ssize_t show_multicast_membership_interval(
491 struct device *d, struct device_attribute *attr, char *buf)
492{
493 struct net_bridge *br = to_bridge(d);
494 return sprintf(buf, "%lu\n",
495 jiffies_to_clock_t(br->multicast_membership_interval));
496}
497
498static int set_membership_interval(struct net_bridge *br, unsigned long val)
499{
500 br->multicast_membership_interval = clock_t_to_jiffies(val);
501 return 0;
502}
503
504static ssize_t store_multicast_membership_interval(
505 struct device *d, struct device_attribute *attr, const char *buf,
506 size_t len)
507{
508 return store_bridge_parm(d, buf, len, set_membership_interval);
509}
510static DEVICE_ATTR(multicast_membership_interval, S_IRUGO | S_IWUSR,
511 show_multicast_membership_interval,
512 store_multicast_membership_interval);
513
514static ssize_t show_multicast_querier_interval(struct device *d,
515 struct device_attribute *attr,
516 char *buf)
517{
518 struct net_bridge *br = to_bridge(d);
519 return sprintf(buf, "%lu\n",
520 jiffies_to_clock_t(br->multicast_querier_interval));
521}
522
523static int set_querier_interval(struct net_bridge *br, unsigned long val)
524{
525 br->multicast_querier_interval = clock_t_to_jiffies(val);
526 return 0;
527}
528
529static ssize_t store_multicast_querier_interval(struct device *d,
530 struct device_attribute *attr,
531 const char *buf, size_t len)
532{
533 return store_bridge_parm(d, buf, len, set_querier_interval);
534}
535static DEVICE_ATTR(multicast_querier_interval, S_IRUGO | S_IWUSR,
536 show_multicast_querier_interval,
537 store_multicast_querier_interval);
538
539static ssize_t show_multicast_query_interval(struct device *d,
540 struct device_attribute *attr,
541 char *buf)
542{
543 struct net_bridge *br = to_bridge(d);
544 return sprintf(buf, "%lu\n",
545 jiffies_to_clock_t(br->multicast_query_interval));
546}
547
548static int set_query_interval(struct net_bridge *br, unsigned long val)
549{
550 br->multicast_query_interval = clock_t_to_jiffies(val);
551 return 0;
552}
553
554static ssize_t store_multicast_query_interval(struct device *d,
555 struct device_attribute *attr,
556 const char *buf, size_t len)
557{
558 return store_bridge_parm(d, buf, len, set_query_interval);
559}
560static DEVICE_ATTR(multicast_query_interval, S_IRUGO | S_IWUSR,
561 show_multicast_query_interval,
562 store_multicast_query_interval);
563
564static ssize_t show_multicast_query_response_interval(
565 struct device *d, struct device_attribute *attr, char *buf)
566{
567 struct net_bridge *br = to_bridge(d);
568 return sprintf(
569 buf, "%lu\n",
570 jiffies_to_clock_t(br->multicast_query_response_interval));
571}
572
573static int set_query_response_interval(struct net_bridge *br, unsigned long val)
574{
575 br->multicast_query_response_interval = clock_t_to_jiffies(val);
576 return 0;
577}
578
579static ssize_t store_multicast_query_response_interval(
580 struct device *d, struct device_attribute *attr, const char *buf,
581 size_t len)
582{
583 return store_bridge_parm(d, buf, len, set_query_response_interval);
584}
585static DEVICE_ATTR(multicast_query_response_interval, S_IRUGO | S_IWUSR,
586 show_multicast_query_response_interval,
587 store_multicast_query_response_interval);
588
589static ssize_t show_multicast_startup_query_interval(
590 struct device *d, struct device_attribute *attr, char *buf)
591{
592 struct net_bridge *br = to_bridge(d);
593 return sprintf(
594 buf, "%lu\n",
595 jiffies_to_clock_t(br->multicast_startup_query_interval));
596}
597
598static int set_startup_query_interval(struct net_bridge *br, unsigned long val)
599{
600 br->multicast_startup_query_interval = clock_t_to_jiffies(val);
601 return 0;
602}
603
604static ssize_t store_multicast_startup_query_interval(
605 struct device *d, struct device_attribute *attr, const char *buf,
606 size_t len)
607{
608 return store_bridge_parm(d, buf, len, set_startup_query_interval);
609}
610static DEVICE_ATTR(multicast_startup_query_interval, S_IRUGO | S_IWUSR,
611 show_multicast_startup_query_interval,
612 store_multicast_startup_query_interval);
613#endif
614
348static struct attribute *bridge_attrs[] = { 615static struct attribute *bridge_attrs[] = {
349 &dev_attr_forward_delay.attr, 616 &dev_attr_forward_delay.attr,
350 &dev_attr_hello_time.attr, 617 &dev_attr_hello_time.attr,
@@ -364,6 +631,20 @@ static struct attribute *bridge_attrs[] = {
364 &dev_attr_gc_timer.attr, 631 &dev_attr_gc_timer.attr,
365 &dev_attr_group_addr.attr, 632 &dev_attr_group_addr.attr,
366 &dev_attr_flush.attr, 633 &dev_attr_flush.attr,
634#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
635 &dev_attr_multicast_router.attr,
636 &dev_attr_multicast_snooping.attr,
637 &dev_attr_hash_elasticity.attr,
638 &dev_attr_hash_max.attr,
639 &dev_attr_multicast_last_member_count.attr,
640 &dev_attr_multicast_startup_query_count.attr,
641 &dev_attr_multicast_last_member_interval.attr,
642 &dev_attr_multicast_membership_interval.attr,
643 &dev_attr_multicast_querier_interval.attr,
644 &dev_attr_multicast_query_interval.attr,
645 &dev_attr_multicast_query_response_interval.attr,
646 &dev_attr_multicast_startup_query_interval.attr,
647#endif
367 NULL 648 NULL
368}; 649};
369 650
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 820643a3ba9c..0b9916489d6b 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -159,6 +159,21 @@ static ssize_t store_hairpin_mode(struct net_bridge_port *p, unsigned long v)
159static BRPORT_ATTR(hairpin_mode, S_IRUGO | S_IWUSR, 159static BRPORT_ATTR(hairpin_mode, S_IRUGO | S_IWUSR,
160 show_hairpin_mode, store_hairpin_mode); 160 show_hairpin_mode, store_hairpin_mode);
161 161
162#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
163static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
164{
165 return sprintf(buf, "%d\n", p->multicast_router);
166}
167
168static ssize_t store_multicast_router(struct net_bridge_port *p,
169 unsigned long v)
170{
171 return br_multicast_set_port_router(p, v);
172}
173static BRPORT_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
174 store_multicast_router);
175#endif
176
162static struct brport_attribute *brport_attrs[] = { 177static struct brport_attribute *brport_attrs[] = {
163 &brport_attr_path_cost, 178 &brport_attr_path_cost,
164 &brport_attr_priority, 179 &brport_attr_priority,
@@ -176,6 +191,9 @@ static struct brport_attribute *brport_attrs[] = {
176 &brport_attr_hold_timer, 191 &brport_attr_hold_timer,
177 &brport_attr_flush, 192 &brport_attr_flush,
178 &brport_attr_hairpin_mode, 193 &brport_attr_hairpin_mode,
194#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
195 &brport_attr_multicast_router,
196#endif
179 NULL 197 NULL
180}; 198};
181 199
@@ -220,7 +238,7 @@ static ssize_t brport_store(struct kobject * kobj,
220 return ret; 238 return ret;
221} 239}
222 240
223struct sysfs_ops brport_sysfs_ops = { 241const struct sysfs_ops brport_sysfs_ops = {
224 .show = brport_show, 242 .show = brport_show,
225 .store = brport_store, 243 .store = brport_store,
226}; 244};
diff --git a/net/bridge/netfilter/ebt_802_3.c b/net/bridge/netfilter/ebt_802_3.c
index bd91dc58d49b..5d1176758ca5 100644
--- a/net/bridge/netfilter/ebt_802_3.c
+++ b/net/bridge/netfilter/ebt_802_3.c
@@ -52,7 +52,7 @@ static struct xt_match ebt_802_3_mt_reg __read_mostly = {
52 .family = NFPROTO_BRIDGE, 52 .family = NFPROTO_BRIDGE,
53 .match = ebt_802_3_mt, 53 .match = ebt_802_3_mt,
54 .checkentry = ebt_802_3_mt_check, 54 .checkentry = ebt_802_3_mt_check,
55 .matchsize = XT_ALIGN(sizeof(struct ebt_802_3_info)), 55 .matchsize = sizeof(struct ebt_802_3_info),
56 .me = THIS_MODULE, 56 .me = THIS_MODULE,
57}; 57};
58 58
diff --git a/net/bridge/netfilter/ebt_arp.c b/net/bridge/netfilter/ebt_arp.c
index b7ad60419f9a..e727697c5847 100644
--- a/net/bridge/netfilter/ebt_arp.c
+++ b/net/bridge/netfilter/ebt_arp.c
@@ -120,7 +120,7 @@ static struct xt_match ebt_arp_mt_reg __read_mostly = {
120 .family = NFPROTO_BRIDGE, 120 .family = NFPROTO_BRIDGE,
121 .match = ebt_arp_mt, 121 .match = ebt_arp_mt,
122 .checkentry = ebt_arp_mt_check, 122 .checkentry = ebt_arp_mt_check,
123 .matchsize = XT_ALIGN(sizeof(struct ebt_arp_info)), 123 .matchsize = sizeof(struct ebt_arp_info),
124 .me = THIS_MODULE, 124 .me = THIS_MODULE,
125}; 125};
126 126
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
index 76584cd72e57..f392e9d93f53 100644
--- a/net/bridge/netfilter/ebt_arpreply.c
+++ b/net/bridge/netfilter/ebt_arpreply.c
@@ -78,7 +78,7 @@ static struct xt_target ebt_arpreply_tg_reg __read_mostly = {
78 .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING), 78 .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING),
79 .target = ebt_arpreply_tg, 79 .target = ebt_arpreply_tg,
80 .checkentry = ebt_arpreply_tg_check, 80 .checkentry = ebt_arpreply_tg_check,
81 .targetsize = XT_ALIGN(sizeof(struct ebt_arpreply_info)), 81 .targetsize = sizeof(struct ebt_arpreply_info),
82 .me = THIS_MODULE, 82 .me = THIS_MODULE,
83}; 83};
84 84
diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
index 6b49ea9e31fb..2bb40d728a35 100644
--- a/net/bridge/netfilter/ebt_dnat.c
+++ b/net/bridge/netfilter/ebt_dnat.c
@@ -54,7 +54,7 @@ static struct xt_target ebt_dnat_tg_reg __read_mostly = {
54 (1 << NF_BR_LOCAL_OUT) | (1 << NF_BR_BROUTING), 54 (1 << NF_BR_LOCAL_OUT) | (1 << NF_BR_BROUTING),
55 .target = ebt_dnat_tg, 55 .target = ebt_dnat_tg,
56 .checkentry = ebt_dnat_tg_check, 56 .checkentry = ebt_dnat_tg_check,
57 .targetsize = XT_ALIGN(sizeof(struct ebt_nat_info)), 57 .targetsize = sizeof(struct ebt_nat_info),
58 .me = THIS_MODULE, 58 .me = THIS_MODULE,
59}; 59};
60 60
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c
index d771bbfbcbe6..5de6df6f86b8 100644
--- a/net/bridge/netfilter/ebt_ip.c
+++ b/net/bridge/netfilter/ebt_ip.c
@@ -110,7 +110,7 @@ static struct xt_match ebt_ip_mt_reg __read_mostly = {
110 .family = NFPROTO_BRIDGE, 110 .family = NFPROTO_BRIDGE,
111 .match = ebt_ip_mt, 111 .match = ebt_ip_mt,
112 .checkentry = ebt_ip_mt_check, 112 .checkentry = ebt_ip_mt_check,
113 .matchsize = XT_ALIGN(sizeof(struct ebt_ip_info)), 113 .matchsize = sizeof(struct ebt_ip_info),
114 .me = THIS_MODULE, 114 .me = THIS_MODULE,
115}; 115};
116 116
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
index 784a6573876c..bbf2534ef026 100644
--- a/net/bridge/netfilter/ebt_ip6.c
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -122,7 +122,7 @@ static struct xt_match ebt_ip6_mt_reg __read_mostly = {
122 .family = NFPROTO_BRIDGE, 122 .family = NFPROTO_BRIDGE,
123 .match = ebt_ip6_mt, 123 .match = ebt_ip6_mt,
124 .checkentry = ebt_ip6_mt_check, 124 .checkentry = ebt_ip6_mt_check,
125 .matchsize = XT_ALIGN(sizeof(struct ebt_ip6_info)), 125 .matchsize = sizeof(struct ebt_ip6_info),
126 .me = THIS_MODULE, 126 .me = THIS_MODULE,
127}; 127};
128 128
diff --git a/net/bridge/netfilter/ebt_limit.c b/net/bridge/netfilter/ebt_limit.c
index f7bd9192ff0c..7a8182710eb3 100644
--- a/net/bridge/netfilter/ebt_limit.c
+++ b/net/bridge/netfilter/ebt_limit.c
@@ -84,13 +84,29 @@ static bool ebt_limit_mt_check(const struct xt_mtchk_param *par)
84 return true; 84 return true;
85} 85}
86 86
87
88#ifdef CONFIG_COMPAT
89/*
90 * no conversion function needed --
91 * only avg/burst have meaningful values in userspace.
92 */
93struct ebt_compat_limit_info {
94 compat_uint_t avg, burst;
95 compat_ulong_t prev;
96 compat_uint_t credit, credit_cap, cost;
97};
98#endif
99
87static struct xt_match ebt_limit_mt_reg __read_mostly = { 100static struct xt_match ebt_limit_mt_reg __read_mostly = {
88 .name = "limit", 101 .name = "limit",
89 .revision = 0, 102 .revision = 0,
90 .family = NFPROTO_BRIDGE, 103 .family = NFPROTO_BRIDGE,
91 .match = ebt_limit_mt, 104 .match = ebt_limit_mt,
92 .checkentry = ebt_limit_mt_check, 105 .checkentry = ebt_limit_mt_check,
93 .matchsize = XT_ALIGN(sizeof(struct ebt_limit_info)), 106 .matchsize = sizeof(struct ebt_limit_info),
107#ifdef CONFIG_COMPAT
108 .compatsize = sizeof(struct ebt_compat_limit_info),
109#endif
94 .me = THIS_MODULE, 110 .me = THIS_MODULE,
95}; 111};
96 112
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index e4ea3fdd1d41..e873924ddb5d 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -195,7 +195,7 @@ static struct xt_target ebt_log_tg_reg __read_mostly = {
195 .family = NFPROTO_BRIDGE, 195 .family = NFPROTO_BRIDGE,
196 .target = ebt_log_tg, 196 .target = ebt_log_tg,
197 .checkentry = ebt_log_tg_check, 197 .checkentry = ebt_log_tg_check,
198 .targetsize = XT_ALIGN(sizeof(struct ebt_log_info)), 198 .targetsize = sizeof(struct ebt_log_info),
199 .me = THIS_MODULE, 199 .me = THIS_MODULE,
200}; 200};
201 201
diff --git a/net/bridge/netfilter/ebt_mark.c b/net/bridge/netfilter/ebt_mark.c
index 2fee7e8e2e93..2b5ce533d6b9 100644
--- a/net/bridge/netfilter/ebt_mark.c
+++ b/net/bridge/netfilter/ebt_mark.c
@@ -52,6 +52,32 @@ static bool ebt_mark_tg_check(const struct xt_tgchk_param *par)
52 return false; 52 return false;
53 return true; 53 return true;
54} 54}
55#ifdef CONFIG_COMPAT
56struct compat_ebt_mark_t_info {
57 compat_ulong_t mark;
58 compat_uint_t target;
59};
60
61static void mark_tg_compat_from_user(void *dst, const void *src)
62{
63 const struct compat_ebt_mark_t_info *user = src;
64 struct ebt_mark_t_info *kern = dst;
65
66 kern->mark = user->mark;
67 kern->target = user->target;
68}
69
70static int mark_tg_compat_to_user(void __user *dst, const void *src)
71{
72 struct compat_ebt_mark_t_info __user *user = dst;
73 const struct ebt_mark_t_info *kern = src;
74
75 if (put_user(kern->mark, &user->mark) ||
76 put_user(kern->target, &user->target))
77 return -EFAULT;
78 return 0;
79}
80#endif
55 81
56static struct xt_target ebt_mark_tg_reg __read_mostly = { 82static struct xt_target ebt_mark_tg_reg __read_mostly = {
57 .name = "mark", 83 .name = "mark",
@@ -59,7 +85,12 @@ static struct xt_target ebt_mark_tg_reg __read_mostly = {
59 .family = NFPROTO_BRIDGE, 85 .family = NFPROTO_BRIDGE,
60 .target = ebt_mark_tg, 86 .target = ebt_mark_tg,
61 .checkentry = ebt_mark_tg_check, 87 .checkentry = ebt_mark_tg_check,
62 .targetsize = XT_ALIGN(sizeof(struct ebt_mark_t_info)), 88 .targetsize = sizeof(struct ebt_mark_t_info),
89#ifdef CONFIG_COMPAT
90 .compatsize = sizeof(struct compat_ebt_mark_t_info),
91 .compat_from_user = mark_tg_compat_from_user,
92 .compat_to_user = mark_tg_compat_to_user,
93#endif
63 .me = THIS_MODULE, 94 .me = THIS_MODULE,
64}; 95};
65 96
diff --git a/net/bridge/netfilter/ebt_mark_m.c b/net/bridge/netfilter/ebt_mark_m.c
index ea570f214b1d..8de8c396d913 100644
--- a/net/bridge/netfilter/ebt_mark_m.c
+++ b/net/bridge/netfilter/ebt_mark_m.c
@@ -35,13 +35,50 @@ static bool ebt_mark_mt_check(const struct xt_mtchk_param *par)
35 return true; 35 return true;
36} 36}
37 37
38
39#ifdef CONFIG_COMPAT
40struct compat_ebt_mark_m_info {
41 compat_ulong_t mark, mask;
42 uint8_t invert, bitmask;
43};
44
45static void mark_mt_compat_from_user(void *dst, const void *src)
46{
47 const struct compat_ebt_mark_m_info *user = src;
48 struct ebt_mark_m_info *kern = dst;
49
50 kern->mark = user->mark;
51 kern->mask = user->mask;
52 kern->invert = user->invert;
53 kern->bitmask = user->bitmask;
54}
55
56static int mark_mt_compat_to_user(void __user *dst, const void *src)
57{
58 struct compat_ebt_mark_m_info __user *user = dst;
59 const struct ebt_mark_m_info *kern = src;
60
61 if (put_user(kern->mark, &user->mark) ||
62 put_user(kern->mask, &user->mask) ||
63 put_user(kern->invert, &user->invert) ||
64 put_user(kern->bitmask, &user->bitmask))
65 return -EFAULT;
66 return 0;
67}
68#endif
69
38static struct xt_match ebt_mark_mt_reg __read_mostly = { 70static struct xt_match ebt_mark_mt_reg __read_mostly = {
39 .name = "mark_m", 71 .name = "mark_m",
40 .revision = 0, 72 .revision = 0,
41 .family = NFPROTO_BRIDGE, 73 .family = NFPROTO_BRIDGE,
42 .match = ebt_mark_mt, 74 .match = ebt_mark_mt,
43 .checkentry = ebt_mark_mt_check, 75 .checkentry = ebt_mark_mt_check,
44 .matchsize = XT_ALIGN(sizeof(struct ebt_mark_m_info)), 76 .matchsize = sizeof(struct ebt_mark_m_info),
77#ifdef CONFIG_COMPAT
78 .compatsize = sizeof(struct compat_ebt_mark_m_info),
79 .compat_from_user = mark_mt_compat_from_user,
80 .compat_to_user = mark_mt_compat_to_user,
81#endif
45 .me = THIS_MODULE, 82 .me = THIS_MODULE,
46}; 83};
47 84
diff --git a/net/bridge/netfilter/ebt_nflog.c b/net/bridge/netfilter/ebt_nflog.c
index 2a63d996dd4e..40dbd248b9ae 100644
--- a/net/bridge/netfilter/ebt_nflog.c
+++ b/net/bridge/netfilter/ebt_nflog.c
@@ -51,7 +51,7 @@ static struct xt_target ebt_nflog_tg_reg __read_mostly = {
51 .family = NFPROTO_BRIDGE, 51 .family = NFPROTO_BRIDGE,
52 .target = ebt_nflog_tg, 52 .target = ebt_nflog_tg,
53 .checkentry = ebt_nflog_tg_check, 53 .checkentry = ebt_nflog_tg_check,
54 .targetsize = XT_ALIGN(sizeof(struct ebt_nflog_info)), 54 .targetsize = sizeof(struct ebt_nflog_info),
55 .me = THIS_MODULE, 55 .me = THIS_MODULE,
56}; 56};
57 57
diff --git a/net/bridge/netfilter/ebt_pkttype.c b/net/bridge/netfilter/ebt_pkttype.c
index 883e96e2a542..e2a07e6cbef3 100644
--- a/net/bridge/netfilter/ebt_pkttype.c
+++ b/net/bridge/netfilter/ebt_pkttype.c
@@ -36,7 +36,7 @@ static struct xt_match ebt_pkttype_mt_reg __read_mostly = {
36 .family = NFPROTO_BRIDGE, 36 .family = NFPROTO_BRIDGE,
37 .match = ebt_pkttype_mt, 37 .match = ebt_pkttype_mt,
38 .checkentry = ebt_pkttype_mt_check, 38 .checkentry = ebt_pkttype_mt_check,
39 .matchsize = XT_ALIGN(sizeof(struct ebt_pkttype_info)), 39 .matchsize = sizeof(struct ebt_pkttype_info),
40 .me = THIS_MODULE, 40 .me = THIS_MODULE,
41}; 41};
42 42
diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
index c8a49f7a57ba..9be8fbcd370b 100644
--- a/net/bridge/netfilter/ebt_redirect.c
+++ b/net/bridge/netfilter/ebt_redirect.c
@@ -59,7 +59,7 @@ static struct xt_target ebt_redirect_tg_reg __read_mostly = {
59 (1 << NF_BR_BROUTING), 59 (1 << NF_BR_BROUTING),
60 .target = ebt_redirect_tg, 60 .target = ebt_redirect_tg,
61 .checkentry = ebt_redirect_tg_check, 61 .checkentry = ebt_redirect_tg_check,
62 .targetsize = XT_ALIGN(sizeof(struct ebt_redirect_info)), 62 .targetsize = sizeof(struct ebt_redirect_info),
63 .me = THIS_MODULE, 63 .me = THIS_MODULE,
64}; 64};
65 65
diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
index 8d04d4c302bd..9c7b520765a2 100644
--- a/net/bridge/netfilter/ebt_snat.c
+++ b/net/bridge/netfilter/ebt_snat.c
@@ -67,7 +67,7 @@ static struct xt_target ebt_snat_tg_reg __read_mostly = {
67 .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_POST_ROUTING), 67 .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_POST_ROUTING),
68 .target = ebt_snat_tg, 68 .target = ebt_snat_tg,
69 .checkentry = ebt_snat_tg_check, 69 .checkentry = ebt_snat_tg_check,
70 .targetsize = XT_ALIGN(sizeof(struct ebt_nat_info)), 70 .targetsize = sizeof(struct ebt_nat_info),
71 .me = THIS_MODULE, 71 .me = THIS_MODULE,
72}; 72};
73 73
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 75e29a9cebda..92a93d363765 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -177,7 +177,7 @@ static struct xt_match ebt_stp_mt_reg __read_mostly = {
177 .family = NFPROTO_BRIDGE, 177 .family = NFPROTO_BRIDGE,
178 .match = ebt_stp_mt, 178 .match = ebt_stp_mt,
179 .checkentry = ebt_stp_mt_check, 179 .checkentry = ebt_stp_mt_check,
180 .matchsize = XT_ALIGN(sizeof(struct ebt_stp_info)), 180 .matchsize = sizeof(struct ebt_stp_info),
181 .me = THIS_MODULE, 181 .me = THIS_MODULE,
182}; 182};
183 183
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index ce50688a6431..c6ac657074a6 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -275,7 +275,7 @@ static struct xt_target ebt_ulog_tg_reg __read_mostly = {
275 .family = NFPROTO_BRIDGE, 275 .family = NFPROTO_BRIDGE,
276 .target = ebt_ulog_tg, 276 .target = ebt_ulog_tg,
277 .checkentry = ebt_ulog_tg_check, 277 .checkentry = ebt_ulog_tg_check,
278 .targetsize = XT_ALIGN(sizeof(struct ebt_ulog_info)), 278 .targetsize = sizeof(struct ebt_ulog_info),
279 .me = THIS_MODULE, 279 .me = THIS_MODULE,
280}; 280};
281 281
diff --git a/net/bridge/netfilter/ebt_vlan.c b/net/bridge/netfilter/ebt_vlan.c
index 3dddd489328e..be1dd2e1f615 100644
--- a/net/bridge/netfilter/ebt_vlan.c
+++ b/net/bridge/netfilter/ebt_vlan.c
@@ -163,7 +163,7 @@ static struct xt_match ebt_vlan_mt_reg __read_mostly = {
163 .family = NFPROTO_BRIDGE, 163 .family = NFPROTO_BRIDGE,
164 .match = ebt_vlan_mt, 164 .match = ebt_vlan_mt,
165 .checkentry = ebt_vlan_mt_check, 165 .checkentry = ebt_vlan_mt_check,
166 .matchsize = XT_ALIGN(sizeof(struct ebt_vlan_info)), 166 .matchsize = sizeof(struct ebt_vlan_info),
167 .me = THIS_MODULE, 167 .me = THIS_MODULE,
168}; 168};
169 169
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index d32ab13e728c..ae3f106c3908 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -71,7 +71,7 @@ static int __net_init broute_net_init(struct net *net)
71 71
72static void __net_exit broute_net_exit(struct net *net) 72static void __net_exit broute_net_exit(struct net *net)
73{ 73{
74 ebt_unregister_table(net->xt.broute_table); 74 ebt_unregister_table(net, net->xt.broute_table);
75} 75}
76 76
77static struct pernet_operations broute_net_ops = { 77static struct pernet_operations broute_net_ops = {
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 60b1a6ca7185..42e6bd094574 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -107,7 +107,7 @@ static int __net_init frame_filter_net_init(struct net *net)
107 107
108static void __net_exit frame_filter_net_exit(struct net *net) 108static void __net_exit frame_filter_net_exit(struct net *net)
109{ 109{
110 ebt_unregister_table(net->xt.frame_filter); 110 ebt_unregister_table(net, net->xt.frame_filter);
111} 111}
112 112
113static struct pernet_operations frame_filter_net_ops = { 113static struct pernet_operations frame_filter_net_ops = {
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 4a98804203b0..6dc2f878ae05 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -107,7 +107,7 @@ static int __net_init frame_nat_net_init(struct net *net)
107 107
108static void __net_exit frame_nat_net_exit(struct net *net) 108static void __net_exit frame_nat_net_exit(struct net *net)
109{ 109{
110 ebt_unregister_table(net->xt.frame_nat); 110 ebt_unregister_table(net, net->xt.frame_nat);
111} 111}
112 112
113static struct pernet_operations frame_nat_net_ops = { 113static struct pernet_operations frame_nat_net_ops = {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index bd1c65425d4f..dfb58056a89a 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -33,11 +33,6 @@
33#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\ 33#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
34 "report to author: "format, ## args) 34 "report to author: "format, ## args)
35/* #define BUGPRINT(format, args...) */ 35/* #define BUGPRINT(format, args...) */
36#define MEMPRINT(format, args...) printk("kernel msg: ebtables "\
37 ": out of memory: "format, ## args)
38/* #define MEMPRINT(format, args...) */
39
40
41 36
42/* 37/*
43 * Each cpu has its own set of counters, so there is no need for write_lock in 38 * Each cpu has its own set of counters, so there is no need for write_lock in
@@ -56,11 +51,37 @@
56 51
57static DEFINE_MUTEX(ebt_mutex); 52static DEFINE_MUTEX(ebt_mutex);
58 53
54#ifdef CONFIG_COMPAT
55static void ebt_standard_compat_from_user(void *dst, const void *src)
56{
57 int v = *(compat_int_t *)src;
58
59 if (v >= 0)
60 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
61 memcpy(dst, &v, sizeof(v));
62}
63
64static int ebt_standard_compat_to_user(void __user *dst, const void *src)
65{
66 compat_int_t cv = *(int *)src;
67
68 if (cv >= 0)
69 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
70 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
71}
72#endif
73
74
59static struct xt_target ebt_standard_target = { 75static struct xt_target ebt_standard_target = {
60 .name = "standard", 76 .name = "standard",
61 .revision = 0, 77 .revision = 0,
62 .family = NFPROTO_BRIDGE, 78 .family = NFPROTO_BRIDGE,
63 .targetsize = sizeof(int), 79 .targetsize = sizeof(int),
80#ifdef CONFIG_COMPAT
81 .compatsize = sizeof(compat_int_t),
82 .compat_from_user = ebt_standard_compat_from_user,
83 .compat_to_user = ebt_standard_compat_to_user,
84#endif
64}; 85};
65 86
66static inline int 87static inline int
@@ -82,7 +103,8 @@ static inline int ebt_do_match (struct ebt_entry_match *m,
82 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH; 103 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
83} 104}
84 105
85static inline int ebt_dev_check(char *entry, const struct net_device *device) 106static inline int
107ebt_dev_check(const char *entry, const struct net_device *device)
86{ 108{
87 int i = 0; 109 int i = 0;
88 const char *devname; 110 const char *devname;
@@ -100,8 +122,9 @@ static inline int ebt_dev_check(char *entry, const struct net_device *device)
100 122
101#define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg)) 123#define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
102/* process standard matches */ 124/* process standard matches */
103static inline int ebt_basic_match(struct ebt_entry *e, struct ethhdr *h, 125static inline int
104 const struct net_device *in, const struct net_device *out) 126ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
127 const struct net_device *in, const struct net_device *out)
105{ 128{
106 int verdict, i; 129 int verdict, i;
107 130
@@ -156,12 +179,12 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
156 int i, nentries; 179 int i, nentries;
157 struct ebt_entry *point; 180 struct ebt_entry *point;
158 struct ebt_counter *counter_base, *cb_base; 181 struct ebt_counter *counter_base, *cb_base;
159 struct ebt_entry_target *t; 182 const struct ebt_entry_target *t;
160 int verdict, sp = 0; 183 int verdict, sp = 0;
161 struct ebt_chainstack *cs; 184 struct ebt_chainstack *cs;
162 struct ebt_entries *chaininfo; 185 struct ebt_entries *chaininfo;
163 char *base; 186 const char *base;
164 struct ebt_table_info *private; 187 const struct ebt_table_info *private;
165 bool hotdrop = false; 188 bool hotdrop = false;
166 struct xt_match_param mtpar; 189 struct xt_match_param mtpar;
167 struct xt_target_param tgpar; 190 struct xt_target_param tgpar;
@@ -395,7 +418,7 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
395 return 0; 418 return 0;
396} 419}
397 420
398static int ebt_verify_pointers(struct ebt_replace *repl, 421static int ebt_verify_pointers(const struct ebt_replace *repl,
399 struct ebt_table_info *newinfo) 422 struct ebt_table_info *newinfo)
400{ 423{
401 unsigned int limit = repl->entries_size; 424 unsigned int limit = repl->entries_size;
@@ -442,6 +465,8 @@ static int ebt_verify_pointers(struct ebt_replace *repl,
442 break; 465 break;
443 if (left < e->next_offset) 466 if (left < e->next_offset)
444 break; 467 break;
468 if (e->next_offset < sizeof(struct ebt_entry))
469 return -EINVAL;
445 offset += e->next_offset; 470 offset += e->next_offset;
446 } 471 }
447 } 472 }
@@ -466,8 +491,8 @@ static int ebt_verify_pointers(struct ebt_replace *repl,
466 * to parse the userspace data 491 * to parse the userspace data
467 */ 492 */
468static inline int 493static inline int
469ebt_check_entry_size_and_hooks(struct ebt_entry *e, 494ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
470 struct ebt_table_info *newinfo, 495 const struct ebt_table_info *newinfo,
471 unsigned int *n, unsigned int *cnt, 496 unsigned int *n, unsigned int *cnt,
472 unsigned int *totalcnt, unsigned int *udc_cnt) 497 unsigned int *totalcnt, unsigned int *udc_cnt)
473{ 498{
@@ -561,13 +586,14 @@ ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
561} 586}
562 587
563static inline int 588static inline int
564ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i) 589ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
565{ 590{
566 struct xt_mtdtor_param par; 591 struct xt_mtdtor_param par;
567 592
568 if (i && (*i)-- == 0) 593 if (i && (*i)-- == 0)
569 return 1; 594 return 1;
570 595
596 par.net = net;
571 par.match = m->u.match; 597 par.match = m->u.match;
572 par.matchinfo = m->data; 598 par.matchinfo = m->data;
573 par.family = NFPROTO_BRIDGE; 599 par.family = NFPROTO_BRIDGE;
@@ -578,13 +604,14 @@ ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i)
578} 604}
579 605
580static inline int 606static inline int
581ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i) 607ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
582{ 608{
583 struct xt_tgdtor_param par; 609 struct xt_tgdtor_param par;
584 610
585 if (i && (*i)-- == 0) 611 if (i && (*i)-- == 0)
586 return 1; 612 return 1;
587 613
614 par.net = net;
588 par.target = w->u.watcher; 615 par.target = w->u.watcher;
589 par.targinfo = w->data; 616 par.targinfo = w->data;
590 par.family = NFPROTO_BRIDGE; 617 par.family = NFPROTO_BRIDGE;
@@ -595,7 +622,7 @@ ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i)
595} 622}
596 623
597static inline int 624static inline int
598ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt) 625ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
599{ 626{
600 struct xt_tgdtor_param par; 627 struct xt_tgdtor_param par;
601 struct ebt_entry_target *t; 628 struct ebt_entry_target *t;
@@ -605,10 +632,11 @@ ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt)
605 /* we're done */ 632 /* we're done */
606 if (cnt && (*cnt)-- == 0) 633 if (cnt && (*cnt)-- == 0)
607 return 1; 634 return 1;
608 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, NULL); 635 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
609 EBT_MATCH_ITERATE(e, ebt_cleanup_match, NULL); 636 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
610 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); 637 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
611 638
639 par.net = net;
612 par.target = t->u.target; 640 par.target = t->u.target;
613 par.targinfo = t->data; 641 par.targinfo = t->data;
614 par.family = NFPROTO_BRIDGE; 642 par.family = NFPROTO_BRIDGE;
@@ -619,7 +647,8 @@ ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt)
619} 647}
620 648
621static inline int 649static inline int
622ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo, 650ebt_check_entry(struct ebt_entry *e, struct net *net,
651 const struct ebt_table_info *newinfo,
623 const char *name, unsigned int *cnt, 652 const char *name, unsigned int *cnt,
624 struct ebt_cl_stack *cl_s, unsigned int udc_cnt) 653 struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
625{ 654{
@@ -671,6 +700,7 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
671 } 700 }
672 i = 0; 701 i = 0;
673 702
703 mtpar.net = tgpar.net = net;
674 mtpar.table = tgpar.table = name; 704 mtpar.table = tgpar.table = name;
675 mtpar.entryinfo = tgpar.entryinfo = e; 705 mtpar.entryinfo = tgpar.entryinfo = e;
676 mtpar.hook_mask = tgpar.hook_mask = hookmask; 706 mtpar.hook_mask = tgpar.hook_mask = hookmask;
@@ -726,9 +756,9 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
726 (*cnt)++; 756 (*cnt)++;
727 return 0; 757 return 0;
728cleanup_watchers: 758cleanup_watchers:
729 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, &j); 759 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
730cleanup_matches: 760cleanup_matches:
731 EBT_MATCH_ITERATE(e, ebt_cleanup_match, &i); 761 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
732 return ret; 762 return ret;
733} 763}
734 764
@@ -737,12 +767,12 @@ cleanup_matches:
737 * the hook mask for udc tells us from which base chains the udc can be 767 * the hook mask for udc tells us from which base chains the udc can be
738 * accessed. This mask is a parameter to the check() functions of the extensions 768 * accessed. This mask is a parameter to the check() functions of the extensions
739 */ 769 */
740static int check_chainloops(struct ebt_entries *chain, struct ebt_cl_stack *cl_s, 770static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
741 unsigned int udc_cnt, unsigned int hooknr, char *base) 771 unsigned int udc_cnt, unsigned int hooknr, char *base)
742{ 772{
743 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict; 773 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
744 struct ebt_entry *e = (struct ebt_entry *)chain->data; 774 const struct ebt_entry *e = (struct ebt_entry *)chain->data;
745 struct ebt_entry_target *t; 775 const struct ebt_entry_target *t;
746 776
747 while (pos < nentries || chain_nr != -1) { 777 while (pos < nentries || chain_nr != -1) {
748 /* end of udc, go back one 'recursion' step */ 778 /* end of udc, go back one 'recursion' step */
@@ -808,7 +838,8 @@ letscontinue:
808} 838}
809 839
810/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */ 840/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
811static int translate_table(char *name, struct ebt_table_info *newinfo) 841static int translate_table(struct net *net, const char *name,
842 struct ebt_table_info *newinfo)
812{ 843{
813 unsigned int i, j, k, udc_cnt; 844 unsigned int i, j, k, udc_cnt;
814 int ret; 845 int ret;
@@ -917,17 +948,17 @@ static int translate_table(char *name, struct ebt_table_info *newinfo)
917 /* used to know what we need to clean up if something goes wrong */ 948 /* used to know what we need to clean up if something goes wrong */
918 i = 0; 949 i = 0;
919 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 950 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
920 ebt_check_entry, newinfo, name, &i, cl_s, udc_cnt); 951 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
921 if (ret != 0) { 952 if (ret != 0) {
922 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 953 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
923 ebt_cleanup_entry, &i); 954 ebt_cleanup_entry, net, &i);
924 } 955 }
925 vfree(cl_s); 956 vfree(cl_s);
926 return ret; 957 return ret;
927} 958}
928 959
929/* called under write_lock */ 960/* called under write_lock */
930static void get_counters(struct ebt_counter *oldcounters, 961static void get_counters(const struct ebt_counter *oldcounters,
931 struct ebt_counter *counters, unsigned int nentries) 962 struct ebt_counter *counters, unsigned int nentries)
932{ 963{
933 int i, cpu; 964 int i, cpu;
@@ -949,90 +980,45 @@ static void get_counters(struct ebt_counter *oldcounters,
949 } 980 }
950} 981}
951 982
952/* replace the table */ 983static int do_replace_finish(struct net *net, struct ebt_replace *repl,
953static int do_replace(struct net *net, void __user *user, unsigned int len) 984 struct ebt_table_info *newinfo)
954{ 985{
955 int ret, i, countersize; 986 int ret, i;
956 struct ebt_table_info *newinfo;
957 struct ebt_replace tmp;
958 struct ebt_table *t;
959 struct ebt_counter *counterstmp = NULL; 987 struct ebt_counter *counterstmp = NULL;
960 /* used to be able to unlock earlier */ 988 /* used to be able to unlock earlier */
961 struct ebt_table_info *table; 989 struct ebt_table_info *table;
962 990 struct ebt_table *t;
963 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
964 return -EFAULT;
965
966 if (len != sizeof(tmp) + tmp.entries_size) {
967 BUGPRINT("Wrong len argument\n");
968 return -EINVAL;
969 }
970
971 if (tmp.entries_size == 0) {
972 BUGPRINT("Entries_size never zero\n");
973 return -EINVAL;
974 }
975 /* overflow check */
976 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS -
977 SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
978 return -ENOMEM;
979 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
980 return -ENOMEM;
981
982 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
983 newinfo = vmalloc(sizeof(*newinfo) + countersize);
984 if (!newinfo)
985 return -ENOMEM;
986
987 if (countersize)
988 memset(newinfo->counters, 0, countersize);
989
990 newinfo->entries = vmalloc(tmp.entries_size);
991 if (!newinfo->entries) {
992 ret = -ENOMEM;
993 goto free_newinfo;
994 }
995 if (copy_from_user(
996 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
997 BUGPRINT("Couldn't copy entries from userspace\n");
998 ret = -EFAULT;
999 goto free_entries;
1000 }
1001 991
1002 /* the user wants counters back 992 /* the user wants counters back
1003 the check on the size is done later, when we have the lock */ 993 the check on the size is done later, when we have the lock */
1004 if (tmp.num_counters) { 994 if (repl->num_counters) {
1005 counterstmp = vmalloc(tmp.num_counters * sizeof(*counterstmp)); 995 unsigned long size = repl->num_counters * sizeof(*counterstmp);
1006 if (!counterstmp) { 996 counterstmp = vmalloc(size);
1007 ret = -ENOMEM; 997 if (!counterstmp)
1008 goto free_entries; 998 return -ENOMEM;
1009 }
1010 } 999 }
1011 else
1012 counterstmp = NULL;
1013 1000
1014 /* this can get initialized by translate_table() */
1015 newinfo->chainstack = NULL; 1001 newinfo->chainstack = NULL;
1016 ret = ebt_verify_pointers(&tmp, newinfo); 1002 ret = ebt_verify_pointers(repl, newinfo);
1017 if (ret != 0) 1003 if (ret != 0)
1018 goto free_counterstmp; 1004 goto free_counterstmp;
1019 1005
1020 ret = translate_table(tmp.name, newinfo); 1006 ret = translate_table(net, repl->name, newinfo);
1021 1007
1022 if (ret != 0) 1008 if (ret != 0)
1023 goto free_counterstmp; 1009 goto free_counterstmp;
1024 1010
1025 t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); 1011 t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1026 if (!t) { 1012 if (!t) {
1027 ret = -ENOENT; 1013 ret = -ENOENT;
1028 goto free_iterate; 1014 goto free_iterate;
1029 } 1015 }
1030 1016
1031 /* the table doesn't like it */ 1017 /* the table doesn't like it */
1032 if (t->check && (ret = t->check(newinfo, tmp.valid_hooks))) 1018 if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1033 goto free_unlock; 1019 goto free_unlock;
1034 1020
1035 if (tmp.num_counters && tmp.num_counters != t->private->nentries) { 1021 if (repl->num_counters && repl->num_counters != t->private->nentries) {
1036 BUGPRINT("Wrong nr. of counters requested\n"); 1022 BUGPRINT("Wrong nr. of counters requested\n");
1037 ret = -EINVAL; 1023 ret = -EINVAL;
1038 goto free_unlock; 1024 goto free_unlock;
@@ -1048,7 +1034,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1048 module_put(t->me); 1034 module_put(t->me);
1049 /* we need an atomic snapshot of the counters */ 1035 /* we need an atomic snapshot of the counters */
1050 write_lock_bh(&t->lock); 1036 write_lock_bh(&t->lock);
1051 if (tmp.num_counters) 1037 if (repl->num_counters)
1052 get_counters(t->private->counters, counterstmp, 1038 get_counters(t->private->counters, counterstmp,
1053 t->private->nentries); 1039 t->private->nentries);
1054 1040
@@ -1059,10 +1045,9 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1059 allocation. Only reason why this is done is because this way the lock 1045 allocation. Only reason why this is done is because this way the lock
1060 is held only once, while this doesn't bring the kernel into a 1046 is held only once, while this doesn't bring the kernel into a
1061 dangerous state. */ 1047 dangerous state. */
1062 if (tmp.num_counters && 1048 if (repl->num_counters &&
1063 copy_to_user(tmp.counters, counterstmp, 1049 copy_to_user(repl->counters, counterstmp,
1064 tmp.num_counters * sizeof(struct ebt_counter))) { 1050 repl->num_counters * sizeof(struct ebt_counter))) {
1065 BUGPRINT("Couldn't copy counters to userspace\n");
1066 ret = -EFAULT; 1051 ret = -EFAULT;
1067 } 1052 }
1068 else 1053 else
@@ -1070,7 +1055,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1070 1055
1071 /* decrease module count and free resources */ 1056 /* decrease module count and free resources */
1072 EBT_ENTRY_ITERATE(table->entries, table->entries_size, 1057 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1073 ebt_cleanup_entry, NULL); 1058 ebt_cleanup_entry, net, NULL);
1074 1059
1075 vfree(table->entries); 1060 vfree(table->entries);
1076 if (table->chainstack) { 1061 if (table->chainstack) {
@@ -1087,7 +1072,7 @@ free_unlock:
1087 mutex_unlock(&ebt_mutex); 1072 mutex_unlock(&ebt_mutex);
1088free_iterate: 1073free_iterate:
1089 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 1074 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1090 ebt_cleanup_entry, NULL); 1075 ebt_cleanup_entry, net, NULL);
1091free_counterstmp: 1076free_counterstmp:
1092 vfree(counterstmp); 1077 vfree(counterstmp);
1093 /* can be initialized in translate_table() */ 1078 /* can be initialized in translate_table() */
@@ -1096,6 +1081,59 @@ free_counterstmp:
1096 vfree(newinfo->chainstack[i]); 1081 vfree(newinfo->chainstack[i]);
1097 vfree(newinfo->chainstack); 1082 vfree(newinfo->chainstack);
1098 } 1083 }
1084 return ret;
1085}
1086
1087/* replace the table */
1088static int do_replace(struct net *net, const void __user *user,
1089 unsigned int len)
1090{
1091 int ret, countersize;
1092 struct ebt_table_info *newinfo;
1093 struct ebt_replace tmp;
1094
1095 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1096 return -EFAULT;
1097
1098 if (len != sizeof(tmp) + tmp.entries_size) {
1099 BUGPRINT("Wrong len argument\n");
1100 return -EINVAL;
1101 }
1102
1103 if (tmp.entries_size == 0) {
1104 BUGPRINT("Entries_size never zero\n");
1105 return -EINVAL;
1106 }
1107 /* overflow check */
1108 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1109 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1110 return -ENOMEM;
1111 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1112 return -ENOMEM;
1113
1114 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1115 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1116 if (!newinfo)
1117 return -ENOMEM;
1118
1119 if (countersize)
1120 memset(newinfo->counters, 0, countersize);
1121
1122 newinfo->entries = vmalloc(tmp.entries_size);
1123 if (!newinfo->entries) {
1124 ret = -ENOMEM;
1125 goto free_newinfo;
1126 }
1127 if (copy_from_user(
1128 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1129 BUGPRINT("Couldn't copy entries from userspace\n");
1130 ret = -EFAULT;
1131 goto free_entries;
1132 }
1133
1134 ret = do_replace_finish(net, &tmp, newinfo);
1135 if (ret == 0)
1136 return ret;
1099free_entries: 1137free_entries:
1100 vfree(newinfo->entries); 1138 vfree(newinfo->entries);
1101free_newinfo: 1139free_newinfo:
@@ -1154,7 +1192,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table)
1154 newinfo->hook_entry[i] = p + 1192 newinfo->hook_entry[i] = p +
1155 ((char *)repl->hook_entry[i] - repl->entries); 1193 ((char *)repl->hook_entry[i] - repl->entries);
1156 } 1194 }
1157 ret = translate_table(repl->name, newinfo); 1195 ret = translate_table(net, repl->name, newinfo);
1158 if (ret != 0) { 1196 if (ret != 0) {
1159 BUGPRINT("Translate_table failed\n"); 1197 BUGPRINT("Translate_table failed\n");
1160 goto free_chainstack; 1198 goto free_chainstack;
@@ -1204,7 +1242,7 @@ out:
1204 return ERR_PTR(ret); 1242 return ERR_PTR(ret);
1205} 1243}
1206 1244
1207void ebt_unregister_table(struct ebt_table *table) 1245void ebt_unregister_table(struct net *net, struct ebt_table *table)
1208{ 1246{
1209 int i; 1247 int i;
1210 1248
@@ -1216,7 +1254,7 @@ void ebt_unregister_table(struct ebt_table *table)
1216 list_del(&table->list); 1254 list_del(&table->list);
1217 mutex_unlock(&ebt_mutex); 1255 mutex_unlock(&ebt_mutex);
1218 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, 1256 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1219 ebt_cleanup_entry, NULL); 1257 ebt_cleanup_entry, net, NULL);
1220 if (table->private->nentries) 1258 if (table->private->nentries)
1221 module_put(table->me); 1259 module_put(table->me);
1222 vfree(table->private->entries); 1260 vfree(table->private->entries);
@@ -1230,39 +1268,33 @@ void ebt_unregister_table(struct ebt_table *table)
1230} 1268}
1231 1269
1232/* userspace just supplied us with counters */ 1270/* userspace just supplied us with counters */
1233static int update_counters(struct net *net, void __user *user, unsigned int len) 1271static int do_update_counters(struct net *net, const char *name,
1272 struct ebt_counter __user *counters,
1273 unsigned int num_counters,
1274 const void __user *user, unsigned int len)
1234{ 1275{
1235 int i, ret; 1276 int i, ret;
1236 struct ebt_counter *tmp; 1277 struct ebt_counter *tmp;
1237 struct ebt_replace hlp;
1238 struct ebt_table *t; 1278 struct ebt_table *t;
1239 1279
1240 if (copy_from_user(&hlp, user, sizeof(hlp))) 1280 if (num_counters == 0)
1241 return -EFAULT;
1242
1243 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1244 return -EINVAL;
1245 if (hlp.num_counters == 0)
1246 return -EINVAL; 1281 return -EINVAL;
1247 1282
1248 if (!(tmp = vmalloc(hlp.num_counters * sizeof(*tmp)))) { 1283 tmp = vmalloc(num_counters * sizeof(*tmp));
1249 MEMPRINT("Update_counters && nomemory\n"); 1284 if (!tmp)
1250 return -ENOMEM; 1285 return -ENOMEM;
1251 }
1252 1286
1253 t = find_table_lock(net, hlp.name, &ret, &ebt_mutex); 1287 t = find_table_lock(net, name, &ret, &ebt_mutex);
1254 if (!t) 1288 if (!t)
1255 goto free_tmp; 1289 goto free_tmp;
1256 1290
1257 if (hlp.num_counters != t->private->nentries) { 1291 if (num_counters != t->private->nentries) {
1258 BUGPRINT("Wrong nr of counters\n"); 1292 BUGPRINT("Wrong nr of counters\n");
1259 ret = -EINVAL; 1293 ret = -EINVAL;
1260 goto unlock_mutex; 1294 goto unlock_mutex;
1261 } 1295 }
1262 1296
1263 if ( copy_from_user(tmp, hlp.counters, 1297 if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1264 hlp.num_counters * sizeof(struct ebt_counter)) ) {
1265 BUGPRINT("Updata_counters && !cfu\n");
1266 ret = -EFAULT; 1298 ret = -EFAULT;
1267 goto unlock_mutex; 1299 goto unlock_mutex;
1268 } 1300 }
@@ -1271,7 +1303,7 @@ static int update_counters(struct net *net, void __user *user, unsigned int len)
1271 write_lock_bh(&t->lock); 1303 write_lock_bh(&t->lock);
1272 1304
1273 /* we add to the counters of the first cpu */ 1305 /* we add to the counters of the first cpu */
1274 for (i = 0; i < hlp.num_counters; i++) { 1306 for (i = 0; i < num_counters; i++) {
1275 t->private->counters[i].pcnt += tmp[i].pcnt; 1307 t->private->counters[i].pcnt += tmp[i].pcnt;
1276 t->private->counters[i].bcnt += tmp[i].bcnt; 1308 t->private->counters[i].bcnt += tmp[i].bcnt;
1277 } 1309 }
@@ -1285,8 +1317,23 @@ free_tmp:
1285 return ret; 1317 return ret;
1286} 1318}
1287 1319
1288static inline int ebt_make_matchname(struct ebt_entry_match *m, 1320static int update_counters(struct net *net, const void __user *user,
1289 char *base, char __user *ubase) 1321 unsigned int len)
1322{
1323 struct ebt_replace hlp;
1324
1325 if (copy_from_user(&hlp, user, sizeof(hlp)))
1326 return -EFAULT;
1327
1328 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1329 return -EINVAL;
1330
1331 return do_update_counters(net, hlp.name, hlp.counters,
1332 hlp.num_counters, user, len);
1333}
1334
1335static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1336 const char *base, char __user *ubase)
1290{ 1337{
1291 char __user *hlp = ubase + ((char *)m - base); 1338 char __user *hlp = ubase + ((char *)m - base);
1292 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN)) 1339 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
@@ -1294,8 +1341,8 @@ static inline int ebt_make_matchname(struct ebt_entry_match *m,
1294 return 0; 1341 return 0;
1295} 1342}
1296 1343
1297static inline int ebt_make_watchername(struct ebt_entry_watcher *w, 1344static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1298 char *base, char __user *ubase) 1345 const char *base, char __user *ubase)
1299{ 1346{
1300 char __user *hlp = ubase + ((char *)w - base); 1347 char __user *hlp = ubase + ((char *)w - base);
1301 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN)) 1348 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
@@ -1303,11 +1350,12 @@ static inline int ebt_make_watchername(struct ebt_entry_watcher *w,
1303 return 0; 1350 return 0;
1304} 1351}
1305 1352
1306static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *ubase) 1353static inline int
1354ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1307{ 1355{
1308 int ret; 1356 int ret;
1309 char __user *hlp; 1357 char __user *hlp;
1310 struct ebt_entry_target *t; 1358 const struct ebt_entry_target *t;
1311 1359
1312 if (e->bitmask == 0) 1360 if (e->bitmask == 0)
1313 return 0; 1361 return 0;
@@ -1326,13 +1374,46 @@ static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *u
1326 return 0; 1374 return 0;
1327} 1375}
1328 1376
1377static int copy_counters_to_user(struct ebt_table *t,
1378 const struct ebt_counter *oldcounters,
1379 void __user *user, unsigned int num_counters,
1380 unsigned int nentries)
1381{
1382 struct ebt_counter *counterstmp;
1383 int ret = 0;
1384
1385 /* userspace might not need the counters */
1386 if (num_counters == 0)
1387 return 0;
1388
1389 if (num_counters != nentries) {
1390 BUGPRINT("Num_counters wrong\n");
1391 return -EINVAL;
1392 }
1393
1394 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1395 if (!counterstmp)
1396 return -ENOMEM;
1397
1398 write_lock_bh(&t->lock);
1399 get_counters(oldcounters, counterstmp, nentries);
1400 write_unlock_bh(&t->lock);
1401
1402 if (copy_to_user(user, counterstmp,
1403 nentries * sizeof(struct ebt_counter)))
1404 ret = -EFAULT;
1405 vfree(counterstmp);
1406 return ret;
1407}
1408
1329/* called with ebt_mutex locked */ 1409/* called with ebt_mutex locked */
1330static int copy_everything_to_user(struct ebt_table *t, void __user *user, 1410static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1331 int *len, int cmd) 1411 const int *len, int cmd)
1332{ 1412{
1333 struct ebt_replace tmp; 1413 struct ebt_replace tmp;
1334 struct ebt_counter *counterstmp, *oldcounters; 1414 const struct ebt_counter *oldcounters;
1335 unsigned int entries_size, nentries; 1415 unsigned int entries_size, nentries;
1416 int ret;
1336 char *entries; 1417 char *entries;
1337 1418
1338 if (cmd == EBT_SO_GET_ENTRIES) { 1419 if (cmd == EBT_SO_GET_ENTRIES) {
@@ -1347,16 +1428,12 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1347 oldcounters = t->table->counters; 1428 oldcounters = t->table->counters;
1348 } 1429 }
1349 1430
1350 if (copy_from_user(&tmp, user, sizeof(tmp))) { 1431 if (copy_from_user(&tmp, user, sizeof(tmp)))
1351 BUGPRINT("Cfu didn't work\n");
1352 return -EFAULT; 1432 return -EFAULT;
1353 }
1354 1433
1355 if (*len != sizeof(struct ebt_replace) + entries_size + 1434 if (*len != sizeof(struct ebt_replace) + entries_size +
1356 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0)) { 1435 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1357 BUGPRINT("Wrong size\n");
1358 return -EINVAL; 1436 return -EINVAL;
1359 }
1360 1437
1361 if (tmp.nentries != nentries) { 1438 if (tmp.nentries != nentries) {
1362 BUGPRINT("Nentries wrong\n"); 1439 BUGPRINT("Nentries wrong\n");
@@ -1368,29 +1445,10 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1368 return -EINVAL; 1445 return -EINVAL;
1369 } 1446 }
1370 1447
1371 /* userspace might not need the counters */ 1448 ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1372 if (tmp.num_counters) { 1449 tmp.num_counters, nentries);
1373 if (tmp.num_counters != nentries) { 1450 if (ret)
1374 BUGPRINT("Num_counters wrong\n"); 1451 return ret;
1375 return -EINVAL;
1376 }
1377 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1378 if (!counterstmp) {
1379 MEMPRINT("Couldn't copy counters, out of memory\n");
1380 return -ENOMEM;
1381 }
1382 write_lock_bh(&t->lock);
1383 get_counters(oldcounters, counterstmp, nentries);
1384 write_unlock_bh(&t->lock);
1385
1386 if (copy_to_user(tmp.counters, counterstmp,
1387 nentries * sizeof(struct ebt_counter))) {
1388 BUGPRINT("Couldn't copy counters to userspace\n");
1389 vfree(counterstmp);
1390 return -EFAULT;
1391 }
1392 vfree(counterstmp);
1393 }
1394 1452
1395 if (copy_to_user(tmp.entries, entries, entries_size)) { 1453 if (copy_to_user(tmp.entries, entries, entries_size)) {
1396 BUGPRINT("Couldn't copy entries to userspace\n"); 1454 BUGPRINT("Couldn't copy entries to userspace\n");
@@ -1406,6 +1464,9 @@ static int do_ebt_set_ctl(struct sock *sk,
1406{ 1464{
1407 int ret; 1465 int ret;
1408 1466
1467 if (!capable(CAP_NET_ADMIN))
1468 return -EPERM;
1469
1409 switch(cmd) { 1470 switch(cmd) {
1410 case EBT_SO_SET_ENTRIES: 1471 case EBT_SO_SET_ENTRIES:
1411 ret = do_replace(sock_net(sk), user, len); 1472 ret = do_replace(sock_net(sk), user, len);
@@ -1415,7 +1476,7 @@ static int do_ebt_set_ctl(struct sock *sk,
1415 break; 1476 break;
1416 default: 1477 default:
1417 ret = -EINVAL; 1478 ret = -EINVAL;
1418 } 1479 }
1419 return ret; 1480 return ret;
1420} 1481}
1421 1482
@@ -1425,6 +1486,9 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1425 struct ebt_replace tmp; 1486 struct ebt_replace tmp;
1426 struct ebt_table *t; 1487 struct ebt_table *t;
1427 1488
1489 if (!capable(CAP_NET_ADMIN))
1490 return -EPERM;
1491
1428 if (copy_from_user(&tmp, user, sizeof(tmp))) 1492 if (copy_from_user(&tmp, user, sizeof(tmp)))
1429 return -EFAULT; 1493 return -EFAULT;
1430 1494
@@ -1472,15 +1536,892 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1472 return ret; 1536 return ret;
1473} 1537}
1474 1538
1539#ifdef CONFIG_COMPAT
1540/* 32 bit-userspace compatibility definitions. */
1541struct compat_ebt_replace {
1542 char name[EBT_TABLE_MAXNAMELEN];
1543 compat_uint_t valid_hooks;
1544 compat_uint_t nentries;
1545 compat_uint_t entries_size;
1546 /* start of the chains */
1547 compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1548 /* nr of counters userspace expects back */
1549 compat_uint_t num_counters;
1550 /* where the kernel will put the old counters. */
1551 compat_uptr_t counters;
1552 compat_uptr_t entries;
1553};
1554
1555/* struct ebt_entry_match, _target and _watcher have same layout */
1556struct compat_ebt_entry_mwt {
1557 union {
1558 char name[EBT_FUNCTION_MAXNAMELEN];
1559 compat_uptr_t ptr;
1560 } u;
1561 compat_uint_t match_size;
1562 compat_uint_t data[0];
1563};
1564
1565/* account for possible padding between match_size and ->data */
1566static int ebt_compat_entry_padsize(void)
1567{
1568 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1569 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1570 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1571 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1572}
1573
1574static int ebt_compat_match_offset(const struct xt_match *match,
1575 unsigned int userlen)
1576{
1577 /*
1578 * ebt_among needs special handling. The kernel .matchsize is
1579 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1580 * value is expected.
1581 * Example: userspace sends 4500, ebt_among.c wants 4504.
1582 */
1583 if (unlikely(match->matchsize == -1))
1584 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1585 return xt_compat_match_offset(match);
1586}
1587
1588static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1589 unsigned int *size)
1590{
1591 const struct xt_match *match = m->u.match;
1592 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1593 int off = ebt_compat_match_offset(match, m->match_size);
1594 compat_uint_t msize = m->match_size - off;
1595
1596 BUG_ON(off >= m->match_size);
1597
1598 if (copy_to_user(cm->u.name, match->name,
1599 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1600 return -EFAULT;
1601
1602 if (match->compat_to_user) {
1603 if (match->compat_to_user(cm->data, m->data))
1604 return -EFAULT;
1605 } else if (copy_to_user(cm->data, m->data, msize))
1606 return -EFAULT;
1607
1608 *size -= ebt_compat_entry_padsize() + off;
1609 *dstptr = cm->data;
1610 *dstptr += msize;
1611 return 0;
1612}
1613
1614static int compat_target_to_user(struct ebt_entry_target *t,
1615 void __user **dstptr,
1616 unsigned int *size)
1617{
1618 const struct xt_target *target = t->u.target;
1619 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1620 int off = xt_compat_target_offset(target);
1621 compat_uint_t tsize = t->target_size - off;
1622
1623 BUG_ON(off >= t->target_size);
1624
1625 if (copy_to_user(cm->u.name, target->name,
1626 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1627 return -EFAULT;
1628
1629 if (target->compat_to_user) {
1630 if (target->compat_to_user(cm->data, t->data))
1631 return -EFAULT;
1632 } else if (copy_to_user(cm->data, t->data, tsize))
1633 return -EFAULT;
1634
1635 *size -= ebt_compat_entry_padsize() + off;
1636 *dstptr = cm->data;
1637 *dstptr += tsize;
1638 return 0;
1639}
1640
1641static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1642 void __user **dstptr,
1643 unsigned int *size)
1644{
1645 return compat_target_to_user((struct ebt_entry_target *)w,
1646 dstptr, size);
1647}
1648
1649static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1650 unsigned int *size)
1651{
1652 struct ebt_entry_target *t;
1653 struct ebt_entry __user *ce;
1654 u32 watchers_offset, target_offset, next_offset;
1655 compat_uint_t origsize;
1656 int ret;
1657
1658 if (e->bitmask == 0) {
1659 if (*size < sizeof(struct ebt_entries))
1660 return -EINVAL;
1661 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1662 return -EFAULT;
1663
1664 *dstptr += sizeof(struct ebt_entries);
1665 *size -= sizeof(struct ebt_entries);
1666 return 0;
1667 }
1668
1669 if (*size < sizeof(*ce))
1670 return -EINVAL;
1671
1672 ce = (struct ebt_entry __user *)*dstptr;
1673 if (copy_to_user(ce, e, sizeof(*ce)))
1674 return -EFAULT;
1675
1676 origsize = *size;
1677 *dstptr += sizeof(*ce);
1678
1679 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1680 if (ret)
1681 return ret;
1682 watchers_offset = e->watchers_offset - (origsize - *size);
1683
1684 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1685 if (ret)
1686 return ret;
1687 target_offset = e->target_offset - (origsize - *size);
1688
1689 t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1690
1691 ret = compat_target_to_user(t, dstptr, size);
1692 if (ret)
1693 return ret;
1694 next_offset = e->next_offset - (origsize - *size);
1695
1696 if (put_user(watchers_offset, &ce->watchers_offset) ||
1697 put_user(target_offset, &ce->target_offset) ||
1698 put_user(next_offset, &ce->next_offset))
1699 return -EFAULT;
1700
1701 *size -= sizeof(*ce);
1702 return 0;
1703}
1704
1705static int compat_calc_match(struct ebt_entry_match *m, int *off)
1706{
1707 *off += ebt_compat_match_offset(m->u.match, m->match_size);
1708 *off += ebt_compat_entry_padsize();
1709 return 0;
1710}
1711
1712static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1713{
1714 *off += xt_compat_target_offset(w->u.watcher);
1715 *off += ebt_compat_entry_padsize();
1716 return 0;
1717}
1718
1719static int compat_calc_entry(const struct ebt_entry *e,
1720 const struct ebt_table_info *info,
1721 const void *base,
1722 struct compat_ebt_replace *newinfo)
1723{
1724 const struct ebt_entry_target *t;
1725 unsigned int entry_offset;
1726 int off, ret, i;
1727
1728 if (e->bitmask == 0)
1729 return 0;
1730
1731 off = 0;
1732 entry_offset = (void *)e - base;
1733
1734 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1735 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1736
1737 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1738
1739 off += xt_compat_target_offset(t->u.target);
1740 off += ebt_compat_entry_padsize();
1741
1742 newinfo->entries_size -= off;
1743
1744 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1745 if (ret)
1746 return ret;
1747
1748 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1749 const void *hookptr = info->hook_entry[i];
1750 if (info->hook_entry[i] &&
1751 (e < (struct ebt_entry *)(base - hookptr))) {
1752 newinfo->hook_entry[i] -= off;
1753 pr_debug("0x%08X -> 0x%08X\n",
1754 newinfo->hook_entry[i] + off,
1755 newinfo->hook_entry[i]);
1756 }
1757 }
1758
1759 return 0;
1760}
1761
1762
1763static int compat_table_info(const struct ebt_table_info *info,
1764 struct compat_ebt_replace *newinfo)
1765{
1766 unsigned int size = info->entries_size;
1767 const void *entries = info->entries;
1768
1769 newinfo->entries_size = size;
1770
1771 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1772 entries, newinfo);
1773}
1774
1775static int compat_copy_everything_to_user(struct ebt_table *t,
1776 void __user *user, int *len, int cmd)
1777{
1778 struct compat_ebt_replace repl, tmp;
1779 struct ebt_counter *oldcounters;
1780 struct ebt_table_info tinfo;
1781 int ret;
1782 void __user *pos;
1783
1784 memset(&tinfo, 0, sizeof(tinfo));
1785
1786 if (cmd == EBT_SO_GET_ENTRIES) {
1787 tinfo.entries_size = t->private->entries_size;
1788 tinfo.nentries = t->private->nentries;
1789 tinfo.entries = t->private->entries;
1790 oldcounters = t->private->counters;
1791 } else {
1792 tinfo.entries_size = t->table->entries_size;
1793 tinfo.nentries = t->table->nentries;
1794 tinfo.entries = t->table->entries;
1795 oldcounters = t->table->counters;
1796 }
1797
1798 if (copy_from_user(&tmp, user, sizeof(tmp)))
1799 return -EFAULT;
1800
1801 if (tmp.nentries != tinfo.nentries ||
1802 (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1803 return -EINVAL;
1804
1805 memcpy(&repl, &tmp, sizeof(repl));
1806 if (cmd == EBT_SO_GET_ENTRIES)
1807 ret = compat_table_info(t->private, &repl);
1808 else
1809 ret = compat_table_info(&tinfo, &repl);
1810 if (ret)
1811 return ret;
1812
1813 if (*len != sizeof(tmp) + repl.entries_size +
1814 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1815 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1816 *len, tinfo.entries_size, repl.entries_size);
1817 return -EINVAL;
1818 }
1819
1820 /* userspace might not need the counters */
1821 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1822 tmp.num_counters, tinfo.nentries);
1823 if (ret)
1824 return ret;
1825
1826 pos = compat_ptr(tmp.entries);
1827 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1828 compat_copy_entry_to_user, &pos, &tmp.entries_size);
1829}
1830
1831struct ebt_entries_buf_state {
1832 char *buf_kern_start; /* kernel buffer to copy (translated) data to */
1833 u32 buf_kern_len; /* total size of kernel buffer */
1834 u32 buf_kern_offset; /* amount of data copied so far */
1835 u32 buf_user_offset; /* read position in userspace buffer */
1836};
1837
1838static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1839{
1840 state->buf_kern_offset += sz;
1841 return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1842}
1843
1844static int ebt_buf_add(struct ebt_entries_buf_state *state,
1845 void *data, unsigned int sz)
1846{
1847 if (state->buf_kern_start == NULL)
1848 goto count_only;
1849
1850 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1851
1852 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1853
1854 count_only:
1855 state->buf_user_offset += sz;
1856 return ebt_buf_count(state, sz);
1857}
1858
1859static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1860{
1861 char *b = state->buf_kern_start;
1862
1863 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1864
1865 if (b != NULL && sz > 0)
1866 memset(b + state->buf_kern_offset, 0, sz);
1867 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1868 return ebt_buf_count(state, sz);
1869}
1870
1871enum compat_mwt {
1872 EBT_COMPAT_MATCH,
1873 EBT_COMPAT_WATCHER,
1874 EBT_COMPAT_TARGET,
1875};
1876
1877static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1878 enum compat_mwt compat_mwt,
1879 struct ebt_entries_buf_state *state,
1880 const unsigned char *base)
1881{
1882 char name[EBT_FUNCTION_MAXNAMELEN];
1883 struct xt_match *match;
1884 struct xt_target *wt;
1885 void *dst = NULL;
1886 int off, pad = 0, ret = 0;
1887 unsigned int size_kern, entry_offset, match_size = mwt->match_size;
1888
1889 strlcpy(name, mwt->u.name, sizeof(name));
1890
1891 if (state->buf_kern_start)
1892 dst = state->buf_kern_start + state->buf_kern_offset;
1893
1894 entry_offset = (unsigned char *) mwt - base;
1895 switch (compat_mwt) {
1896 case EBT_COMPAT_MATCH:
1897 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
1898 name, 0), "ebt_%s", name);
1899 if (match == NULL)
1900 return -ENOENT;
1901 if (IS_ERR(match))
1902 return PTR_ERR(match);
1903
1904 off = ebt_compat_match_offset(match, match_size);
1905 if (dst) {
1906 if (match->compat_from_user)
1907 match->compat_from_user(dst, mwt->data);
1908 else
1909 memcpy(dst, mwt->data, match_size);
1910 }
1911
1912 size_kern = match->matchsize;
1913 if (unlikely(size_kern == -1))
1914 size_kern = match_size;
1915 module_put(match->me);
1916 break;
1917 case EBT_COMPAT_WATCHER: /* fallthrough */
1918 case EBT_COMPAT_TARGET:
1919 wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
1920 name, 0), "ebt_%s", name);
1921 if (wt == NULL)
1922 return -ENOENT;
1923 if (IS_ERR(wt))
1924 return PTR_ERR(wt);
1925 off = xt_compat_target_offset(wt);
1926
1927 if (dst) {
1928 if (wt->compat_from_user)
1929 wt->compat_from_user(dst, mwt->data);
1930 else
1931 memcpy(dst, mwt->data, match_size);
1932 }
1933
1934 size_kern = wt->targetsize;
1935 module_put(wt->me);
1936 break;
1937 }
1938
1939 if (!dst) {
1940 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
1941 off + ebt_compat_entry_padsize());
1942 if (ret < 0)
1943 return ret;
1944 }
1945
1946 state->buf_kern_offset += match_size + off;
1947 state->buf_user_offset += match_size;
1948 pad = XT_ALIGN(size_kern) - size_kern;
1949
1950 if (pad > 0 && dst) {
1951 BUG_ON(state->buf_kern_len <= pad);
1952 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1953 memset(dst + size_kern, 0, pad);
1954 }
1955 return off + match_size;
1956}
1957
1958/*
1959 * return size of all matches, watchers or target, including necessary
1960 * alignment and padding.
1961 */
1962static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1963 unsigned int size_left, enum compat_mwt type,
1964 struct ebt_entries_buf_state *state, const void *base)
1965{
1966 int growth = 0;
1967 char *buf;
1968
1969 if (size_left == 0)
1970 return 0;
1971
1972 buf = (char *) match32;
1973
1974 while (size_left >= sizeof(*match32)) {
1975 struct ebt_entry_match *match_kern;
1976 int ret;
1977
1978 match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1979 if (match_kern) {
1980 char *tmp;
1981 tmp = state->buf_kern_start + state->buf_kern_offset;
1982 match_kern = (struct ebt_entry_match *) tmp;
1983 }
1984 ret = ebt_buf_add(state, buf, sizeof(*match32));
1985 if (ret < 0)
1986 return ret;
1987 size_left -= sizeof(*match32);
1988
1989 /* add padding before match->data (if any) */
1990 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1991 if (ret < 0)
1992 return ret;
1993
1994 if (match32->match_size > size_left)
1995 return -EINVAL;
1996
1997 size_left -= match32->match_size;
1998
1999 ret = compat_mtw_from_user(match32, type, state, base);
2000 if (ret < 0)
2001 return ret;
2002
2003 BUG_ON(ret < match32->match_size);
2004 growth += ret - match32->match_size;
2005 growth += ebt_compat_entry_padsize();
2006
2007 buf += sizeof(*match32);
2008 buf += match32->match_size;
2009
2010 if (match_kern)
2011 match_kern->match_size = ret;
2012
2013 WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2014 match32 = (struct compat_ebt_entry_mwt *) buf;
2015 }
2016
2017 return growth;
2018}
2019
2020#define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2021({ \
2022 unsigned int __i; \
2023 int __ret = 0; \
2024 struct compat_ebt_entry_mwt *__watcher; \
2025 \
2026 for (__i = e->watchers_offset; \
2027 __i < (e)->target_offset; \
2028 __i += __watcher->watcher_size + \
2029 sizeof(struct compat_ebt_entry_mwt)) { \
2030 __watcher = (void *)(e) + __i; \
2031 __ret = fn(__watcher , ## args); \
2032 if (__ret != 0) \
2033 break; \
2034 } \
2035 if (__ret == 0) { \
2036 if (__i != (e)->target_offset) \
2037 __ret = -EINVAL; \
2038 } \
2039 __ret; \
2040})
2041
2042#define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2043({ \
2044 unsigned int __i; \
2045 int __ret = 0; \
2046 struct compat_ebt_entry_mwt *__match; \
2047 \
2048 for (__i = sizeof(struct ebt_entry); \
2049 __i < (e)->watchers_offset; \
2050 __i += __match->match_size + \
2051 sizeof(struct compat_ebt_entry_mwt)) { \
2052 __match = (void *)(e) + __i; \
2053 __ret = fn(__match , ## args); \
2054 if (__ret != 0) \
2055 break; \
2056 } \
2057 if (__ret == 0) { \
2058 if (__i != (e)->watchers_offset) \
2059 __ret = -EINVAL; \
2060 } \
2061 __ret; \
2062})
2063
2064/* called for all ebt_entry structures. */
2065static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2066 unsigned int *total,
2067 struct ebt_entries_buf_state *state)
2068{
2069 unsigned int i, j, startoff, new_offset = 0;
2070 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2071 unsigned int offsets[4];
2072 unsigned int *offsets_update = NULL;
2073 int ret;
2074 char *buf_start;
2075
2076 if (*total < sizeof(struct ebt_entries))
2077 return -EINVAL;
2078
2079 if (!entry->bitmask) {
2080 *total -= sizeof(struct ebt_entries);
2081 return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2082 }
2083 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2084 return -EINVAL;
2085
2086 startoff = state->buf_user_offset;
2087 /* pull in most part of ebt_entry, it does not need to be changed. */
2088 ret = ebt_buf_add(state, entry,
2089 offsetof(struct ebt_entry, watchers_offset));
2090 if (ret < 0)
2091 return ret;
2092
2093 offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2094 memcpy(&offsets[1], &entry->watchers_offset,
2095 sizeof(offsets) - sizeof(offsets[0]));
2096
2097 if (state->buf_kern_start) {
2098 buf_start = state->buf_kern_start + state->buf_kern_offset;
2099 offsets_update = (unsigned int *) buf_start;
2100 }
2101 ret = ebt_buf_add(state, &offsets[1],
2102 sizeof(offsets) - sizeof(offsets[0]));
2103 if (ret < 0)
2104 return ret;
2105 buf_start = (char *) entry;
2106 /*
2107 * 0: matches offset, always follows ebt_entry.
2108 * 1: watchers offset, from ebt_entry structure
2109 * 2: target offset, from ebt_entry structure
2110 * 3: next ebt_entry offset, from ebt_entry structure
2111 *
2112 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2113 */
2114 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2115 struct compat_ebt_entry_mwt *match32;
2116 unsigned int size;
2117 char *buf = buf_start;
2118
2119 buf = buf_start + offsets[i];
2120 if (offsets[i] > offsets[j])
2121 return -EINVAL;
2122
2123 match32 = (struct compat_ebt_entry_mwt *) buf;
2124 size = offsets[j] - offsets[i];
2125 ret = ebt_size_mwt(match32, size, i, state, base);
2126 if (ret < 0)
2127 return ret;
2128 new_offset += ret;
2129 if (offsets_update && new_offset) {
2130 pr_debug("ebtables: change offset %d to %d\n",
2131 offsets_update[i], offsets[j] + new_offset);
2132 offsets_update[i] = offsets[j] + new_offset;
2133 }
2134 }
2135
2136 startoff = state->buf_user_offset - startoff;
2137
2138 BUG_ON(*total < startoff);
2139 *total -= startoff;
2140 return 0;
2141}
2142
2143/*
2144 * repl->entries_size is the size of the ebt_entry blob in userspace.
2145 * It might need more memory when copied to a 64 bit kernel in case
2146 * userspace is 32-bit. So, first task: find out how much memory is needed.
2147 *
2148 * Called before validation is performed.
2149 */
2150static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2151 struct ebt_entries_buf_state *state)
2152{
2153 unsigned int size_remaining = size_user;
2154 int ret;
2155
2156 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2157 &size_remaining, state);
2158 if (ret < 0)
2159 return ret;
2160
2161 WARN_ON(size_remaining);
2162 return state->buf_kern_offset;
2163}
2164
2165
2166static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2167 void __user *user, unsigned int len)
2168{
2169 struct compat_ebt_replace tmp;
2170 int i;
2171
2172 if (len < sizeof(tmp))
2173 return -EINVAL;
2174
2175 if (copy_from_user(&tmp, user, sizeof(tmp)))
2176 return -EFAULT;
2177
2178 if (len != sizeof(tmp) + tmp.entries_size)
2179 return -EINVAL;
2180
2181 if (tmp.entries_size == 0)
2182 return -EINVAL;
2183
2184 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2185 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2186 return -ENOMEM;
2187 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2188 return -ENOMEM;
2189
2190 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2191
2192 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2193 for (i = 0; i < NF_BR_NUMHOOKS; i++)
2194 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2195
2196 repl->num_counters = tmp.num_counters;
2197 repl->counters = compat_ptr(tmp.counters);
2198 repl->entries = compat_ptr(tmp.entries);
2199 return 0;
2200}
2201
2202static int compat_do_replace(struct net *net, void __user *user,
2203 unsigned int len)
2204{
2205 int ret, i, countersize, size64;
2206 struct ebt_table_info *newinfo;
2207 struct ebt_replace tmp;
2208 struct ebt_entries_buf_state state;
2209 void *entries_tmp;
2210
2211 ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2212 if (ret) {
2213 /* try real handler in case userland supplied needed padding */
2214 if (ret == -EINVAL && do_replace(net, user, len) == 0)
2215 ret = 0;
2216 return ret;
2217 }
2218
2219 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2220 newinfo = vmalloc(sizeof(*newinfo) + countersize);
2221 if (!newinfo)
2222 return -ENOMEM;
2223
2224 if (countersize)
2225 memset(newinfo->counters, 0, countersize);
2226
2227 memset(&state, 0, sizeof(state));
2228
2229 newinfo->entries = vmalloc(tmp.entries_size);
2230 if (!newinfo->entries) {
2231 ret = -ENOMEM;
2232 goto free_newinfo;
2233 }
2234 if (copy_from_user(
2235 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2236 ret = -EFAULT;
2237 goto free_entries;
2238 }
2239
2240 entries_tmp = newinfo->entries;
2241
2242 xt_compat_lock(NFPROTO_BRIDGE);
2243
2244 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2245 if (ret < 0)
2246 goto out_unlock;
2247
2248 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2249 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2250 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2251
2252 size64 = ret;
2253 newinfo->entries = vmalloc(size64);
2254 if (!newinfo->entries) {
2255 vfree(entries_tmp);
2256 ret = -ENOMEM;
2257 goto out_unlock;
2258 }
2259
2260 memset(&state, 0, sizeof(state));
2261 state.buf_kern_start = newinfo->entries;
2262 state.buf_kern_len = size64;
2263
2264 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2265 BUG_ON(ret < 0); /* parses same data again */
2266
2267 vfree(entries_tmp);
2268 tmp.entries_size = size64;
2269
2270 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2271 char __user *usrptr;
2272 if (tmp.hook_entry[i]) {
2273 unsigned int delta;
2274 usrptr = (char __user *) tmp.hook_entry[i];
2275 delta = usrptr - tmp.entries;
2276 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2277 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2278 }
2279 }
2280
2281 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2282 xt_compat_unlock(NFPROTO_BRIDGE);
2283
2284 ret = do_replace_finish(net, &tmp, newinfo);
2285 if (ret == 0)
2286 return ret;
2287free_entries:
2288 vfree(newinfo->entries);
2289free_newinfo:
2290 vfree(newinfo);
2291 return ret;
2292out_unlock:
2293 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2294 xt_compat_unlock(NFPROTO_BRIDGE);
2295 goto free_entries;
2296}
2297
2298static int compat_update_counters(struct net *net, void __user *user,
2299 unsigned int len)
2300{
2301 struct compat_ebt_replace hlp;
2302
2303 if (copy_from_user(&hlp, user, sizeof(hlp)))
2304 return -EFAULT;
2305
2306 /* try real handler in case userland supplied needed padding */
2307 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2308 return update_counters(net, user, len);
2309
2310 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2311 hlp.num_counters, user, len);
2312}
2313
2314static int compat_do_ebt_set_ctl(struct sock *sk,
2315 int cmd, void __user *user, unsigned int len)
2316{
2317 int ret;
2318
2319 if (!capable(CAP_NET_ADMIN))
2320 return -EPERM;
2321
2322 switch (cmd) {
2323 case EBT_SO_SET_ENTRIES:
2324 ret = compat_do_replace(sock_net(sk), user, len);
2325 break;
2326 case EBT_SO_SET_COUNTERS:
2327 ret = compat_update_counters(sock_net(sk), user, len);
2328 break;
2329 default:
2330 ret = -EINVAL;
2331 }
2332 return ret;
2333}
2334
2335static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2336 void __user *user, int *len)
2337{
2338 int ret;
2339 struct compat_ebt_replace tmp;
2340 struct ebt_table *t;
2341
2342 if (!capable(CAP_NET_ADMIN))
2343 return -EPERM;
2344
2345 /* try real handler in case userland supplied needed padding */
2346 if ((cmd == EBT_SO_GET_INFO ||
2347 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2348 return do_ebt_get_ctl(sk, cmd, user, len);
2349
2350 if (copy_from_user(&tmp, user, sizeof(tmp)))
2351 return -EFAULT;
2352
2353 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
2354 if (!t)
2355 return ret;
2356
2357 xt_compat_lock(NFPROTO_BRIDGE);
2358 switch (cmd) {
2359 case EBT_SO_GET_INFO:
2360 tmp.nentries = t->private->nentries;
2361 ret = compat_table_info(t->private, &tmp);
2362 if (ret)
2363 goto out;
2364 tmp.valid_hooks = t->valid_hooks;
2365
2366 if (copy_to_user(user, &tmp, *len) != 0) {
2367 ret = -EFAULT;
2368 break;
2369 }
2370 ret = 0;
2371 break;
2372 case EBT_SO_GET_INIT_INFO:
2373 tmp.nentries = t->table->nentries;
2374 tmp.entries_size = t->table->entries_size;
2375 tmp.valid_hooks = t->table->valid_hooks;
2376
2377 if (copy_to_user(user, &tmp, *len) != 0) {
2378 ret = -EFAULT;
2379 break;
2380 }
2381 ret = 0;
2382 break;
2383 case EBT_SO_GET_ENTRIES:
2384 case EBT_SO_GET_INIT_ENTRIES:
2385 /*
2386 * try real handler first in case of userland-side padding.
2387 * in case we are dealing with an 'ordinary' 32 bit binary
2388 * without 64bit compatibility padding, this will fail right
2389 * after copy_from_user when the *len argument is validated.
2390 *
2391 * the compat_ variant needs to do one pass over the kernel
2392 * data set to adjust for size differences before it the check.
2393 */
2394 if (copy_everything_to_user(t, user, len, cmd) == 0)
2395 ret = 0;
2396 else
2397 ret = compat_copy_everything_to_user(t, user, len, cmd);
2398 break;
2399 default:
2400 ret = -EINVAL;
2401 }
2402 out:
2403 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2404 xt_compat_unlock(NFPROTO_BRIDGE);
2405 mutex_unlock(&ebt_mutex);
2406 return ret;
2407}
2408#endif
2409
1475static struct nf_sockopt_ops ebt_sockopts = 2410static struct nf_sockopt_ops ebt_sockopts =
1476{ 2411{
1477 .pf = PF_INET, 2412 .pf = PF_INET,
1478 .set_optmin = EBT_BASE_CTL, 2413 .set_optmin = EBT_BASE_CTL,
1479 .set_optmax = EBT_SO_SET_MAX + 1, 2414 .set_optmax = EBT_SO_SET_MAX + 1,
1480 .set = do_ebt_set_ctl, 2415 .set = do_ebt_set_ctl,
2416#ifdef CONFIG_COMPAT
2417 .compat_set = compat_do_ebt_set_ctl,
2418#endif
1481 .get_optmin = EBT_BASE_CTL, 2419 .get_optmin = EBT_BASE_CTL,
1482 .get_optmax = EBT_SO_GET_MAX + 1, 2420 .get_optmax = EBT_SO_GET_MAX + 1,
1483 .get = do_ebt_get_ctl, 2421 .get = do_ebt_get_ctl,
2422#ifdef CONFIG_COMPAT
2423 .compat_get = compat_do_ebt_get_ctl,
2424#endif
1484 .owner = THIS_MODULE, 2425 .owner = THIS_MODULE,
1485}; 2426};
1486 2427
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 51adc4c2b860..702be5a2c956 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -77,8 +77,8 @@ static int stats_timer __read_mostly = 1;
77module_param(stats_timer, int, S_IRUGO); 77module_param(stats_timer, int, S_IRUGO);
78MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)"); 78MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
79 79
80HLIST_HEAD(can_rx_dev_list); 80/* receive filters subscribed for 'all' CAN devices */
81static struct dev_rcv_lists can_rx_alldev_list; 81struct dev_rcv_lists can_rx_alldev_list;
82static DEFINE_SPINLOCK(can_rcvlists_lock); 82static DEFINE_SPINLOCK(can_rcvlists_lock);
83 83
84static struct kmem_cache *rcv_cache __read_mostly; 84static struct kmem_cache *rcv_cache __read_mostly;
@@ -292,28 +292,10 @@ EXPORT_SYMBOL(can_send);
292 292
293static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev) 293static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
294{ 294{
295 struct dev_rcv_lists *d = NULL; 295 if (!dev)
296 struct hlist_node *n; 296 return &can_rx_alldev_list;
297 297 else
298 /* 298 return (struct dev_rcv_lists *)dev->ml_priv;
299 * find receive list for this device
300 *
301 * The hlist_for_each_entry*() macros curse through the list
302 * using the pointer variable n and set d to the containing
303 * struct in each list iteration. Therefore, after list
304 * iteration, d is unmodified when the list is empty, and it
305 * points to last list element, when the list is non-empty
306 * but no match in the loop body is found. I.e. d is *not*
307 * NULL when no match is found. We can, however, use the
308 * cursor variable n to decide if a match was found.
309 */
310
311 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
312 if (d->dev == dev)
313 break;
314 }
315
316 return n ? d : NULL;
317} 299}
318 300
319/** 301/**
@@ -433,6 +415,9 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
433 415
434 /* insert new receiver (dev,canid,mask) -> (func,data) */ 416 /* insert new receiver (dev,canid,mask) -> (func,data) */
435 417
418 if (dev && dev->type != ARPHRD_CAN)
419 return -ENODEV;
420
436 r = kmem_cache_alloc(rcv_cache, GFP_KERNEL); 421 r = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
437 if (!r) 422 if (!r)
438 return -ENOMEM; 423 return -ENOMEM;
@@ -468,16 +453,6 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
468EXPORT_SYMBOL(can_rx_register); 453EXPORT_SYMBOL(can_rx_register);
469 454
470/* 455/*
471 * can_rx_delete_device - rcu callback for dev_rcv_lists structure removal
472 */
473static void can_rx_delete_device(struct rcu_head *rp)
474{
475 struct dev_rcv_lists *d = container_of(rp, struct dev_rcv_lists, rcu);
476
477 kfree(d);
478}
479
480/*
481 * can_rx_delete_receiver - rcu callback for single receiver entry removal 456 * can_rx_delete_receiver - rcu callback for single receiver entry removal
482 */ 457 */
483static void can_rx_delete_receiver(struct rcu_head *rp) 458static void can_rx_delete_receiver(struct rcu_head *rp)
@@ -506,6 +481,9 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
506 struct hlist_node *next; 481 struct hlist_node *next;
507 struct dev_rcv_lists *d; 482 struct dev_rcv_lists *d;
508 483
484 if (dev && dev->type != ARPHRD_CAN)
485 return;
486
509 spin_lock(&can_rcvlists_lock); 487 spin_lock(&can_rcvlists_lock);
510 488
511 d = find_dev_rcv_lists(dev); 489 d = find_dev_rcv_lists(dev);
@@ -541,7 +519,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
541 "dev %s, id %03X, mask %03X\n", 519 "dev %s, id %03X, mask %03X\n",
542 DNAME(dev), can_id, mask); 520 DNAME(dev), can_id, mask);
543 r = NULL; 521 r = NULL;
544 d = NULL;
545 goto out; 522 goto out;
546 } 523 }
547 524
@@ -552,10 +529,10 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
552 can_pstats.rcv_entries--; 529 can_pstats.rcv_entries--;
553 530
554 /* remove device structure requested by NETDEV_UNREGISTER */ 531 /* remove device structure requested by NETDEV_UNREGISTER */
555 if (d->remove_on_zero_entries && !d->entries) 532 if (d->remove_on_zero_entries && !d->entries) {
556 hlist_del_rcu(&d->list); 533 kfree(d);
557 else 534 dev->ml_priv = NULL;
558 d = NULL; 535 }
559 536
560 out: 537 out:
561 spin_unlock(&can_rcvlists_lock); 538 spin_unlock(&can_rcvlists_lock);
@@ -563,10 +540,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
563 /* schedule the receiver item for deletion */ 540 /* schedule the receiver item for deletion */
564 if (r) 541 if (r)
565 call_rcu(&r->rcu, can_rx_delete_receiver); 542 call_rcu(&r->rcu, can_rx_delete_receiver);
566
567 /* schedule the device structure for deletion */
568 if (d)
569 call_rcu(&d->rcu, can_rx_delete_device);
570} 543}
571EXPORT_SYMBOL(can_rx_unregister); 544EXPORT_SYMBOL(can_rx_unregister);
572 545
@@ -780,48 +753,35 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
780 753
781 case NETDEV_REGISTER: 754 case NETDEV_REGISTER:
782 755
783 /* 756 /* create new dev_rcv_lists for this device */
784 * create new dev_rcv_lists for this device
785 *
786 * N.B. zeroing the struct is the correct initialization
787 * for the embedded hlist_head structs.
788 * Another list type, e.g. list_head, would require
789 * explicit initialization.
790 */
791
792 d = kzalloc(sizeof(*d), GFP_KERNEL); 757 d = kzalloc(sizeof(*d), GFP_KERNEL);
793 if (!d) { 758 if (!d) {
794 printk(KERN_ERR 759 printk(KERN_ERR
795 "can: allocation of receive list failed\n"); 760 "can: allocation of receive list failed\n");
796 return NOTIFY_DONE; 761 return NOTIFY_DONE;
797 } 762 }
798 d->dev = dev; 763 BUG_ON(dev->ml_priv);
799 764 dev->ml_priv = d;
800 spin_lock(&can_rcvlists_lock);
801 hlist_add_head_rcu(&d->list, &can_rx_dev_list);
802 spin_unlock(&can_rcvlists_lock);
803 765
804 break; 766 break;
805 767
806 case NETDEV_UNREGISTER: 768 case NETDEV_UNREGISTER:
807 spin_lock(&can_rcvlists_lock); 769 spin_lock(&can_rcvlists_lock);
808 770
809 d = find_dev_rcv_lists(dev); 771 d = dev->ml_priv;
810 if (d) { 772 if (d) {
811 if (d->entries) { 773 if (d->entries)
812 d->remove_on_zero_entries = 1; 774 d->remove_on_zero_entries = 1;
813 d = NULL; 775 else {
814 } else 776 kfree(d);
815 hlist_del_rcu(&d->list); 777 dev->ml_priv = NULL;
778 }
816 } else 779 } else
817 printk(KERN_ERR "can: notifier: receive list not " 780 printk(KERN_ERR "can: notifier: receive list not "
818 "found for dev %s\n", dev->name); 781 "found for dev %s\n", dev->name);
819 782
820 spin_unlock(&can_rcvlists_lock); 783 spin_unlock(&can_rcvlists_lock);
821 784
822 if (d)
823 call_rcu(&d->rcu, can_rx_delete_device);
824
825 break; 785 break;
826 } 786 }
827 787
@@ -853,21 +813,13 @@ static __init int can_init(void)
853{ 813{
854 printk(banner); 814 printk(banner);
855 815
816 memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list));
817
856 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver), 818 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
857 0, 0, NULL); 819 0, 0, NULL);
858 if (!rcv_cache) 820 if (!rcv_cache)
859 return -ENOMEM; 821 return -ENOMEM;
860 822
861 /*
862 * Insert can_rx_alldev_list for reception on all devices.
863 * This struct is zero initialized which is correct for the
864 * embedded hlist heads, the dev pointer, and the entries counter.
865 */
866
867 spin_lock(&can_rcvlists_lock);
868 hlist_add_head_rcu(&can_rx_alldev_list.list, &can_rx_dev_list);
869 spin_unlock(&can_rcvlists_lock);
870
871 if (stats_timer) { 823 if (stats_timer) {
872 /* the statistics are updated every second (timer triggered) */ 824 /* the statistics are updated every second (timer triggered) */
873 setup_timer(&can_stattimer, can_stat_update, 0); 825 setup_timer(&can_stattimer, can_stat_update, 0);
@@ -887,8 +839,7 @@ static __init int can_init(void)
887 839
888static __exit void can_exit(void) 840static __exit void can_exit(void)
889{ 841{
890 struct dev_rcv_lists *d; 842 struct net_device *dev;
891 struct hlist_node *n, *next;
892 843
893 if (stats_timer) 844 if (stats_timer)
894 del_timer(&can_stattimer); 845 del_timer(&can_stattimer);
@@ -900,14 +851,19 @@ static __exit void can_exit(void)
900 unregister_netdevice_notifier(&can_netdev_notifier); 851 unregister_netdevice_notifier(&can_netdev_notifier);
901 sock_unregister(PF_CAN); 852 sock_unregister(PF_CAN);
902 853
903 /* remove can_rx_dev_list */ 854 /* remove created dev_rcv_lists from still registered CAN devices */
904 spin_lock(&can_rcvlists_lock); 855 rcu_read_lock();
905 hlist_del(&can_rx_alldev_list.list); 856 for_each_netdev_rcu(&init_net, dev) {
906 hlist_for_each_entry_safe(d, n, next, &can_rx_dev_list, list) { 857 if (dev->type == ARPHRD_CAN && dev->ml_priv){
907 hlist_del(&d->list); 858
908 kfree(d); 859 struct dev_rcv_lists *d = dev->ml_priv;
860
861 BUG_ON(d->entries);
862 kfree(d);
863 dev->ml_priv = NULL;
864 }
909 } 865 }
910 spin_unlock(&can_rcvlists_lock); 866 rcu_read_unlock();
911 867
912 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 868 rcu_barrier(); /* Wait for completion of call_rcu()'s */
913 869
diff --git a/net/can/af_can.h b/net/can/af_can.h
index 18f91e37cc30..34253b84e30f 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -63,10 +63,8 @@ struct receiver {
63 63
64enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX }; 64enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX };
65 65
66/* per device receive filters linked at dev->ml_priv */
66struct dev_rcv_lists { 67struct dev_rcv_lists {
67 struct hlist_node list;
68 struct rcu_head rcu;
69 struct net_device *dev;
70 struct hlist_head rx[RX_MAX]; 68 struct hlist_head rx[RX_MAX];
71 struct hlist_head rx_sff[0x800]; 69 struct hlist_head rx_sff[0x800];
72 int remove_on_zero_entries; 70 int remove_on_zero_entries;
diff --git a/net/can/proc.c b/net/can/proc.c
index 9b9ad29be567..f4265cc9c3fb 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -45,6 +45,7 @@
45#include <linux/proc_fs.h> 45#include <linux/proc_fs.h>
46#include <linux/list.h> 46#include <linux/list.h>
47#include <linux/rcupdate.h> 47#include <linux/rcupdate.h>
48#include <linux/if_arp.h>
48#include <linux/can/core.h> 49#include <linux/can/core.h>
49 50
50#include "af_can.h" 51#include "af_can.h"
@@ -84,6 +85,9 @@ static const char rx_list_name[][8] = {
84 [RX_EFF] = "rx_eff", 85 [RX_EFF] = "rx_eff",
85}; 86};
86 87
88/* receive filters subscribed for 'all' CAN devices */
89extern struct dev_rcv_lists can_rx_alldev_list;
90
87/* 91/*
88 * af_can statistics stuff 92 * af_can statistics stuff
89 */ 93 */
@@ -190,10 +194,6 @@ void can_stat_update(unsigned long data)
190 194
191/* 195/*
192 * proc read functions 196 * proc read functions
193 *
194 * From known use-cases we expect about 10 entries in a receive list to be
195 * printed in the proc_fs. So PAGE_SIZE is definitely enough space here.
196 *
197 */ 197 */
198 198
199static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list, 199static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
@@ -202,7 +202,6 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
202 struct receiver *r; 202 struct receiver *r;
203 struct hlist_node *n; 203 struct hlist_node *n;
204 204
205 rcu_read_lock();
206 hlist_for_each_entry_rcu(r, n, rx_list, list) { 205 hlist_for_each_entry_rcu(r, n, rx_list, list) {
207 char *fmt = (r->can_id & CAN_EFF_FLAG)? 206 char *fmt = (r->can_id & CAN_EFF_FLAG)?
208 " %-5s %08X %08x %08x %08x %8ld %s\n" : 207 " %-5s %08X %08x %08x %08x %8ld %s\n" :
@@ -212,7 +211,6 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
212 (unsigned long)r->func, (unsigned long)r->data, 211 (unsigned long)r->func, (unsigned long)r->data,
213 r->matches, r->ident); 212 r->matches, r->ident);
214 } 213 }
215 rcu_read_unlock();
216} 214}
217 215
218static void can_print_recv_banner(struct seq_file *m) 216static void can_print_recv_banner(struct seq_file *m)
@@ -346,24 +344,39 @@ static const struct file_operations can_version_proc_fops = {
346 .release = single_release, 344 .release = single_release,
347}; 345};
348 346
347static inline void can_rcvlist_proc_show_one(struct seq_file *m, int idx,
348 struct net_device *dev,
349 struct dev_rcv_lists *d)
350{
351 if (!hlist_empty(&d->rx[idx])) {
352 can_print_recv_banner(m);
353 can_print_rcvlist(m, &d->rx[idx], dev);
354 } else
355 seq_printf(m, " (%s: no entry)\n", DNAME(dev));
356
357}
358
349static int can_rcvlist_proc_show(struct seq_file *m, void *v) 359static int can_rcvlist_proc_show(struct seq_file *m, void *v)
350{ 360{
351 /* double cast to prevent GCC warning */ 361 /* double cast to prevent GCC warning */
352 int idx = (int)(long)m->private; 362 int idx = (int)(long)m->private;
363 struct net_device *dev;
353 struct dev_rcv_lists *d; 364 struct dev_rcv_lists *d;
354 struct hlist_node *n;
355 365
356 seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]); 366 seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]);
357 367
358 rcu_read_lock(); 368 rcu_read_lock();
359 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
360 369
361 if (!hlist_empty(&d->rx[idx])) { 370 /* receive list for 'all' CAN devices (dev == NULL) */
362 can_print_recv_banner(m); 371 d = &can_rx_alldev_list;
363 can_print_rcvlist(m, &d->rx[idx], d->dev); 372 can_rcvlist_proc_show_one(m, idx, NULL, d);
364 } else 373
365 seq_printf(m, " (%s: no entry)\n", DNAME(d->dev)); 374 /* receive list for registered CAN devices */
375 for_each_netdev_rcu(&init_net, dev) {
376 if (dev->type == ARPHRD_CAN && dev->ml_priv)
377 can_rcvlist_proc_show_one(m, idx, dev, dev->ml_priv);
366 } 378 }
379
367 rcu_read_unlock(); 380 rcu_read_unlock();
368 381
369 seq_putc(m, '\n'); 382 seq_putc(m, '\n');
@@ -383,34 +396,50 @@ static const struct file_operations can_rcvlist_proc_fops = {
383 .release = single_release, 396 .release = single_release,
384}; 397};
385 398
399static inline void can_rcvlist_sff_proc_show_one(struct seq_file *m,
400 struct net_device *dev,
401 struct dev_rcv_lists *d)
402{
403 int i;
404 int all_empty = 1;
405
406 /* check wether at least one list is non-empty */
407 for (i = 0; i < 0x800; i++)
408 if (!hlist_empty(&d->rx_sff[i])) {
409 all_empty = 0;
410 break;
411 }
412
413 if (!all_empty) {
414 can_print_recv_banner(m);
415 for (i = 0; i < 0x800; i++) {
416 if (!hlist_empty(&d->rx_sff[i]))
417 can_print_rcvlist(m, &d->rx_sff[i], dev);
418 }
419 } else
420 seq_printf(m, " (%s: no entry)\n", DNAME(dev));
421}
422
386static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v) 423static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
387{ 424{
425 struct net_device *dev;
388 struct dev_rcv_lists *d; 426 struct dev_rcv_lists *d;
389 struct hlist_node *n;
390 427
391 /* RX_SFF */ 428 /* RX_SFF */
392 seq_puts(m, "\nreceive list 'rx_sff':\n"); 429 seq_puts(m, "\nreceive list 'rx_sff':\n");
393 430
394 rcu_read_lock(); 431 rcu_read_lock();
395 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) { 432
396 int i, all_empty = 1; 433 /* sff receive list for 'all' CAN devices (dev == NULL) */
397 /* check wether at least one list is non-empty */ 434 d = &can_rx_alldev_list;
398 for (i = 0; i < 0x800; i++) 435 can_rcvlist_sff_proc_show_one(m, NULL, d);
399 if (!hlist_empty(&d->rx_sff[i])) { 436
400 all_empty = 0; 437 /* sff receive list for registered CAN devices */
401 break; 438 for_each_netdev_rcu(&init_net, dev) {
402 } 439 if (dev->type == ARPHRD_CAN && dev->ml_priv)
403 440 can_rcvlist_sff_proc_show_one(m, dev, dev->ml_priv);
404 if (!all_empty) {
405 can_print_recv_banner(m);
406 for (i = 0; i < 0x800; i++) {
407 if (!hlist_empty(&d->rx_sff[i]))
408 can_print_rcvlist(m, &d->rx_sff[i],
409 d->dev);
410 }
411 } else
412 seq_printf(m, " (%s: no entry)\n", DNAME(d->dev));
413 } 441 }
442
414 rcu_read_unlock(); 443 rcu_read_unlock();
415 444
416 seq_putc(m, '\n'); 445 seq_putc(m, '\n');
diff --git a/net/core/dev.c b/net/core/dev.c
index 6fe7d739e59b..59d4394d2ce8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1113,19 +1113,7 @@ void dev_load(struct net *net, const char *name)
1113} 1113}
1114EXPORT_SYMBOL(dev_load); 1114EXPORT_SYMBOL(dev_load);
1115 1115
1116/** 1116static int __dev_open(struct net_device *dev)
1117 * dev_open - prepare an interface for use.
1118 * @dev: device to open
1119 *
1120 * Takes a device from down to up state. The device's private open
1121 * function is invoked and then the multicast lists are loaded. Finally
1122 * the device is moved into the up state and a %NETDEV_UP message is
1123 * sent to the netdev notifier chain.
1124 *
1125 * Calling this function on an active interface is a nop. On a failure
1126 * a negative errno code is returned.
1127 */
1128int dev_open(struct net_device *dev)
1129{ 1117{
1130 const struct net_device_ops *ops = dev->netdev_ops; 1118 const struct net_device_ops *ops = dev->netdev_ops;
1131 int ret; 1119 int ret;
@@ -1133,13 +1121,6 @@ int dev_open(struct net_device *dev)
1133 ASSERT_RTNL(); 1121 ASSERT_RTNL();
1134 1122
1135 /* 1123 /*
1136 * Is it already up?
1137 */
1138
1139 if (dev->flags & IFF_UP)
1140 return 0;
1141
1142 /*
1143 * Is it even present? 1124 * Is it even present?
1144 */ 1125 */
1145 if (!netif_device_present(dev)) 1126 if (!netif_device_present(dev))
@@ -1187,36 +1168,57 @@ int dev_open(struct net_device *dev)
1187 * Wakeup transmit queue engine 1168 * Wakeup transmit queue engine
1188 */ 1169 */
1189 dev_activate(dev); 1170 dev_activate(dev);
1190
1191 /*
1192 * ... and announce new interface.
1193 */
1194 call_netdevice_notifiers(NETDEV_UP, dev);
1195 } 1171 }
1196 1172
1197 return ret; 1173 return ret;
1198} 1174}
1199EXPORT_SYMBOL(dev_open);
1200 1175
1201/** 1176/**
1202 * dev_close - shutdown an interface. 1177 * dev_open - prepare an interface for use.
1203 * @dev: device to shutdown 1178 * @dev: device to open
1204 * 1179 *
1205 * This function moves an active device into down state. A 1180 * Takes a device from down to up state. The device's private open
1206 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device 1181 * function is invoked and then the multicast lists are loaded. Finally
1207 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier 1182 * the device is moved into the up state and a %NETDEV_UP message is
1208 * chain. 1183 * sent to the netdev notifier chain.
1184 *
1185 * Calling this function on an active interface is a nop. On a failure
1186 * a negative errno code is returned.
1209 */ 1187 */
1210int dev_close(struct net_device *dev) 1188int dev_open(struct net_device *dev)
1189{
1190 int ret;
1191
1192 /*
1193 * Is it already up?
1194 */
1195 if (dev->flags & IFF_UP)
1196 return 0;
1197
1198 /*
1199 * Open device
1200 */
1201 ret = __dev_open(dev);
1202 if (ret < 0)
1203 return ret;
1204
1205 /*
1206 * ... and announce new interface.
1207 */
1208 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1209 call_netdevice_notifiers(NETDEV_UP, dev);
1210
1211 return ret;
1212}
1213EXPORT_SYMBOL(dev_open);
1214
1215static int __dev_close(struct net_device *dev)
1211{ 1216{
1212 const struct net_device_ops *ops = dev->netdev_ops; 1217 const struct net_device_ops *ops = dev->netdev_ops;
1213 ASSERT_RTNL();
1214 1218
1219 ASSERT_RTNL();
1215 might_sleep(); 1220 might_sleep();
1216 1221
1217 if (!(dev->flags & IFF_UP))
1218 return 0;
1219
1220 /* 1222 /*
1221 * Tell people we are going down, so that they can 1223 * Tell people we are going down, so that they can
1222 * prepare to death, when device is still operating. 1224 * prepare to death, when device is still operating.
@@ -1252,14 +1254,34 @@ int dev_close(struct net_device *dev)
1252 dev->flags &= ~IFF_UP; 1254 dev->flags &= ~IFF_UP;
1253 1255
1254 /* 1256 /*
1255 * Tell people we are down 1257 * Shutdown NET_DMA
1256 */ 1258 */
1257 call_netdevice_notifiers(NETDEV_DOWN, dev); 1259 net_dmaengine_put();
1260
1261 return 0;
1262}
1263
1264/**
1265 * dev_close - shutdown an interface.
1266 * @dev: device to shutdown
1267 *
1268 * This function moves an active device into down state. A
1269 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1270 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1271 * chain.
1272 */
1273int dev_close(struct net_device *dev)
1274{
1275 if (!(dev->flags & IFF_UP))
1276 return 0;
1277
1278 __dev_close(dev);
1258 1279
1259 /* 1280 /*
1260 * Shutdown NET_DMA 1281 * Tell people we are down
1261 */ 1282 */
1262 net_dmaengine_put(); 1283 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1284 call_netdevice_notifiers(NETDEV_DOWN, dev);
1263 1285
1264 return 0; 1286 return 0;
1265} 1287}
@@ -1448,13 +1470,10 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1448 if (skb->len > (dev->mtu + dev->hard_header_len)) 1470 if (skb->len > (dev->mtu + dev->hard_header_len))
1449 return NET_RX_DROP; 1471 return NET_RX_DROP;
1450 1472
1451 skb_dst_drop(skb); 1473 skb_set_dev(skb, dev);
1452 skb->tstamp.tv64 = 0; 1474 skb->tstamp.tv64 = 0;
1453 skb->pkt_type = PACKET_HOST; 1475 skb->pkt_type = PACKET_HOST;
1454 skb->protocol = eth_type_trans(skb, dev); 1476 skb->protocol = eth_type_trans(skb, dev);
1455 skb->mark = 0;
1456 secpath_reset(skb);
1457 nf_reset(skb);
1458 return netif_rx(skb); 1477 return netif_rx(skb);
1459} 1478}
1460EXPORT_SYMBOL_GPL(dev_forward_skb); 1479EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -1614,6 +1633,36 @@ static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1614 return false; 1633 return false;
1615} 1634}
1616 1635
1636/**
1637 * skb_dev_set -- assign a new device to a buffer
1638 * @skb: buffer for the new device
1639 * @dev: network device
1640 *
1641 * If an skb is owned by a device already, we have to reset
1642 * all data private to the namespace a device belongs to
1643 * before assigning it a new device.
1644 */
1645#ifdef CONFIG_NET_NS
1646void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1647{
1648 skb_dst_drop(skb);
1649 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1650 secpath_reset(skb);
1651 nf_reset(skb);
1652 skb_init_secmark(skb);
1653 skb->mark = 0;
1654 skb->priority = 0;
1655 skb->nf_trace = 0;
1656 skb->ipvs_property = 0;
1657#ifdef CONFIG_NET_SCHED
1658 skb->tc_index = 0;
1659#endif
1660 }
1661 skb->dev = dev;
1662}
1663EXPORT_SYMBOL(skb_set_dev);
1664#endif /* CONFIG_NET_NS */
1665
1617/* 1666/*
1618 * Invalidate hardware checksum when packet is to be mangled, and 1667 * Invalidate hardware checksum when packet is to be mangled, and
1619 * complete checksum manually on outgoing path. 1668 * complete checksum manually on outgoing path.
@@ -1853,6 +1902,14 @@ gso:
1853 1902
1854 skb->next = nskb->next; 1903 skb->next = nskb->next;
1855 nskb->next = NULL; 1904 nskb->next = NULL;
1905
1906 /*
1907 * If device doesnt need nskb->dst, release it right now while
1908 * its hot in this cpu cache
1909 */
1910 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1911 skb_dst_drop(nskb);
1912
1856 rc = ops->ndo_start_xmit(nskb, dev); 1913 rc = ops->ndo_start_xmit(nskb, dev);
1857 if (unlikely(rc != NETDEV_TX_OK)) { 1914 if (unlikely(rc != NETDEV_TX_OK)) {
1858 if (rc & ~NETDEV_TX_MASK) 1915 if (rc & ~NETDEV_TX_MASK)
@@ -1974,6 +2031,21 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1974 return rc; 2031 return rc;
1975} 2032}
1976 2033
2034/*
2035 * Returns true if either:
2036 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2037 * 2. skb is fragmented and the device does not support SG, or if
2038 * at least one of fragments is in highmem and device does not
2039 * support DMA from it.
2040 */
2041static inline int skb_needs_linearize(struct sk_buff *skb,
2042 struct net_device *dev)
2043{
2044 return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
2045 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
2046 illegal_highdma(dev, skb)));
2047}
2048
1977/** 2049/**
1978 * dev_queue_xmit - transmit a buffer 2050 * dev_queue_xmit - transmit a buffer
1979 * @skb: buffer to transmit 2051 * @skb: buffer to transmit
@@ -2010,18 +2082,8 @@ int dev_queue_xmit(struct sk_buff *skb)
2010 if (netif_needs_gso(dev, skb)) 2082 if (netif_needs_gso(dev, skb))
2011 goto gso; 2083 goto gso;
2012 2084
2013 if (skb_has_frags(skb) && 2085 /* Convert a paged skb to linear, if required */
2014 !(dev->features & NETIF_F_FRAGLIST) && 2086 if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
2015 __skb_linearize(skb))
2016 goto out_kfree_skb;
2017
2018 /* Fragmented skb is linearized if device does not support SG,
2019 * or if at least one of fragments is in highmem and device
2020 * does not support DMA from it.
2021 */
2022 if (skb_shinfo(skb)->nr_frags &&
2023 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
2024 __skb_linearize(skb))
2025 goto out_kfree_skb; 2087 goto out_kfree_skb;
2026 2088
2027 /* If packet is not checksummed and device does not support 2089 /* If packet is not checksummed and device does not support
@@ -2041,7 +2103,7 @@ gso:
2041 rcu_read_lock_bh(); 2103 rcu_read_lock_bh();
2042 2104
2043 txq = dev_pick_tx(dev, skb); 2105 txq = dev_pick_tx(dev, skb);
2044 q = rcu_dereference(txq->qdisc); 2106 q = rcu_dereference_bh(txq->qdisc);
2045 2107
2046#ifdef CONFIG_NET_CLS_ACT 2108#ifdef CONFIG_NET_CLS_ACT
2047 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); 2109 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
@@ -2421,7 +2483,9 @@ int netif_receive_skb(struct sk_buff *skb)
2421{ 2483{
2422 struct packet_type *ptype, *pt_prev; 2484 struct packet_type *ptype, *pt_prev;
2423 struct net_device *orig_dev; 2485 struct net_device *orig_dev;
2486 struct net_device *master;
2424 struct net_device *null_or_orig; 2487 struct net_device *null_or_orig;
2488 struct net_device *null_or_bond;
2425 int ret = NET_RX_DROP; 2489 int ret = NET_RX_DROP;
2426 __be16 type; 2490 __be16 type;
2427 2491
@@ -2440,11 +2504,12 @@ int netif_receive_skb(struct sk_buff *skb)
2440 2504
2441 null_or_orig = NULL; 2505 null_or_orig = NULL;
2442 orig_dev = skb->dev; 2506 orig_dev = skb->dev;
2443 if (orig_dev->master) { 2507 master = ACCESS_ONCE(orig_dev->master);
2444 if (skb_bond_should_drop(skb)) 2508 if (master) {
2509 if (skb_bond_should_drop(skb, master))
2445 null_or_orig = orig_dev; /* deliver only exact match */ 2510 null_or_orig = orig_dev; /* deliver only exact match */
2446 else 2511 else
2447 skb->dev = orig_dev->master; 2512 skb->dev = master;
2448 } 2513 }
2449 2514
2450 __get_cpu_var(netdev_rx_stat).total++; 2515 __get_cpu_var(netdev_rx_stat).total++;
@@ -2487,12 +2552,24 @@ ncls:
2487 if (!skb) 2552 if (!skb)
2488 goto out; 2553 goto out;
2489 2554
2555 /*
2556 * Make sure frames received on VLAN interfaces stacked on
2557 * bonding interfaces still make their way to any base bonding
2558 * device that may have registered for a specific ptype. The
2559 * handler may have to adjust skb->dev and orig_dev.
2560 */
2561 null_or_bond = NULL;
2562 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2563 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2564 null_or_bond = vlan_dev_real_dev(skb->dev);
2565 }
2566
2490 type = skb->protocol; 2567 type = skb->protocol;
2491 list_for_each_entry_rcu(ptype, 2568 list_for_each_entry_rcu(ptype,
2492 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2569 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2493 if (ptype->type == type && 2570 if (ptype->type == type && (ptype->dev == null_or_orig ||
2494 (ptype->dev == null_or_orig || ptype->dev == skb->dev || 2571 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2495 ptype->dev == orig_dev)) { 2572 ptype->dev == null_or_bond)) {
2496 if (pt_prev) 2573 if (pt_prev)
2497 ret = deliver_skb(skb, pt_prev, orig_dev); 2574 ret = deliver_skb(skb, pt_prev, orig_dev);
2498 pt_prev = ptype; 2575 pt_prev = ptype;
@@ -2561,7 +2638,7 @@ out:
2561 return netif_receive_skb(skb); 2638 return netif_receive_skb(skb);
2562} 2639}
2563 2640
2564void napi_gro_flush(struct napi_struct *napi) 2641static void napi_gro_flush(struct napi_struct *napi)
2565{ 2642{
2566 struct sk_buff *skb, *next; 2643 struct sk_buff *skb, *next;
2567 2644
@@ -2574,7 +2651,6 @@ void napi_gro_flush(struct napi_struct *napi)
2574 napi->gro_count = 0; 2651 napi->gro_count = 0;
2575 napi->gro_list = NULL; 2652 napi->gro_list = NULL;
2576} 2653}
2577EXPORT_SYMBOL(napi_gro_flush);
2578 2654
2579enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2655enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2580{ 2656{
@@ -2761,7 +2837,7 @@ gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
2761 switch (ret) { 2837 switch (ret) {
2762 case GRO_NORMAL: 2838 case GRO_NORMAL:
2763 case GRO_HELD: 2839 case GRO_HELD:
2764 skb->protocol = eth_type_trans(skb, napi->dev); 2840 skb->protocol = eth_type_trans(skb, skb->dev);
2765 2841
2766 if (ret == GRO_HELD) 2842 if (ret == GRO_HELD)
2767 skb_gro_pull(skb, -ETH_HLEN); 2843 skb_gro_pull(skb, -ETH_HLEN);
@@ -2966,7 +3042,7 @@ static void net_rx_action(struct softirq_action *h)
2966 * entries to the tail of this list, and only ->poll() 3042 * entries to the tail of this list, and only ->poll()
2967 * calls can remove this head entry from the list. 3043 * calls can remove this head entry from the list.
2968 */ 3044 */
2969 n = list_entry(list->next, struct napi_struct, poll_list); 3045 n = list_first_entry(list, struct napi_struct, poll_list);
2970 3046
2971 have = netpoll_poll_lock(n); 3047 have = netpoll_poll_lock(n);
2972 3048
@@ -3185,7 +3261,7 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3185{ 3261{
3186 const struct net_device_stats *stats = dev_get_stats(dev); 3262 const struct net_device_stats *stats = dev_get_stats(dev);
3187 3263
3188 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " 3264 seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3189 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", 3265 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3190 dev->name, stats->rx_bytes, stats->rx_packets, 3266 dev->name, stats->rx_bytes, stats->rx_packets,
3191 stats->rx_errors, 3267 stats->rx_errors,
@@ -3640,10 +3716,10 @@ void __dev_set_rx_mode(struct net_device *dev)
3640 /* Unicast addresses changes may only happen under the rtnl, 3716 /* Unicast addresses changes may only happen under the rtnl,
3641 * therefore calling __dev_set_promiscuity here is safe. 3717 * therefore calling __dev_set_promiscuity here is safe.
3642 */ 3718 */
3643 if (dev->uc.count > 0 && !dev->uc_promisc) { 3719 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
3644 __dev_set_promiscuity(dev, 1); 3720 __dev_set_promiscuity(dev, 1);
3645 dev->uc_promisc = 1; 3721 dev->uc_promisc = 1;
3646 } else if (dev->uc.count == 0 && dev->uc_promisc) { 3722 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
3647 __dev_set_promiscuity(dev, -1); 3723 __dev_set_promiscuity(dev, -1);
3648 dev->uc_promisc = 0; 3724 dev->uc_promisc = 0;
3649 } 3725 }
@@ -4211,7 +4287,7 @@ static void dev_addr_discard(struct net_device *dev)
4211 netif_addr_lock_bh(dev); 4287 netif_addr_lock_bh(dev);
4212 4288
4213 __dev_addr_discard(&dev->mc_list); 4289 __dev_addr_discard(&dev->mc_list);
4214 dev->mc_count = 0; 4290 netdev_mc_count(dev) = 0;
4215 4291
4216 netif_addr_unlock_bh(dev); 4292 netif_addr_unlock_bh(dev);
4217} 4293}
@@ -4247,18 +4323,10 @@ unsigned dev_get_flags(const struct net_device *dev)
4247} 4323}
4248EXPORT_SYMBOL(dev_get_flags); 4324EXPORT_SYMBOL(dev_get_flags);
4249 4325
4250/** 4326int __dev_change_flags(struct net_device *dev, unsigned int flags)
4251 * dev_change_flags - change device settings
4252 * @dev: device
4253 * @flags: device state flags
4254 *
4255 * Change settings on device based state flags. The flags are
4256 * in the userspace exported format.
4257 */
4258int dev_change_flags(struct net_device *dev, unsigned flags)
4259{ 4327{
4260 int ret, changes;
4261 int old_flags = dev->flags; 4328 int old_flags = dev->flags;
4329 int ret;
4262 4330
4263 ASSERT_RTNL(); 4331 ASSERT_RTNL();
4264 4332
@@ -4289,17 +4357,12 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
4289 4357
4290 ret = 0; 4358 ret = 0;
4291 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */ 4359 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4292 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev); 4360 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4293 4361
4294 if (!ret) 4362 if (!ret)
4295 dev_set_rx_mode(dev); 4363 dev_set_rx_mode(dev);
4296 } 4364 }
4297 4365
4298 if (dev->flags & IFF_UP &&
4299 ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
4300 IFF_VOLATILE)))
4301 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4302
4303 if ((flags ^ dev->gflags) & IFF_PROMISC) { 4366 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4304 int inc = (flags & IFF_PROMISC) ? 1 : -1; 4367 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4305 4368
@@ -4318,11 +4381,47 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
4318 dev_set_allmulti(dev, inc); 4381 dev_set_allmulti(dev, inc);
4319 } 4382 }
4320 4383
4321 /* Exclude state transition flags, already notified */ 4384 return ret;
4322 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING); 4385}
4386
4387void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4388{
4389 unsigned int changes = dev->flags ^ old_flags;
4390
4391 if (changes & IFF_UP) {
4392 if (dev->flags & IFF_UP)
4393 call_netdevice_notifiers(NETDEV_UP, dev);
4394 else
4395 call_netdevice_notifiers(NETDEV_DOWN, dev);
4396 }
4397
4398 if (dev->flags & IFF_UP &&
4399 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4400 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4401}
4402
4403/**
4404 * dev_change_flags - change device settings
4405 * @dev: device
4406 * @flags: device state flags
4407 *
4408 * Change settings on device based state flags. The flags are
4409 * in the userspace exported format.
4410 */
4411int dev_change_flags(struct net_device *dev, unsigned flags)
4412{
4413 int ret, changes;
4414 int old_flags = dev->flags;
4415
4416 ret = __dev_change_flags(dev, flags);
4417 if (ret < 0)
4418 return ret;
4419
4420 changes = old_flags ^ dev->flags;
4323 if (changes) 4421 if (changes)
4324 rtmsg_ifinfo(RTM_NEWLINK, dev, changes); 4422 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4325 4423
4424 __dev_notify_flags(dev, old_flags);
4326 return ret; 4425 return ret;
4327} 4426}
4328EXPORT_SYMBOL(dev_change_flags); 4427EXPORT_SYMBOL(dev_change_flags);
@@ -4813,6 +4912,10 @@ static void rollback_registered_many(struct list_head *head)
4813 */ 4912 */
4814 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 4913 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4815 4914
4915 if (!dev->rtnl_link_ops ||
4916 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4917 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4918
4816 /* 4919 /*
4817 * Flush the unicast and multicast chains 4920 * Flush the unicast and multicast chains
4818 */ 4921 */
@@ -4830,7 +4933,7 @@ static void rollback_registered_many(struct list_head *head)
4830 } 4933 }
4831 4934
4832 /* Process any work delayed until the end of the batch */ 4935 /* Process any work delayed until the end of the batch */
4833 dev = list_entry(head->next, struct net_device, unreg_list); 4936 dev = list_first_entry(head, struct net_device, unreg_list);
4834 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); 4937 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
4835 4938
4836 synchronize_net(); 4939 synchronize_net();
@@ -5035,6 +5138,13 @@ int register_netdevice(struct net_device *dev)
5035 rollback_registered(dev); 5138 rollback_registered(dev);
5036 dev->reg_state = NETREG_UNREGISTERED; 5139 dev->reg_state = NETREG_UNREGISTERED;
5037 } 5140 }
5141 /*
5142 * Prevent userspace races by waiting until the network
5143 * device is fully setup before sending notifications.
5144 */
5145 if (!dev->rtnl_link_ops ||
5146 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5147 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5038 5148
5039out: 5149out:
5040 return ret; 5150 return ret;
@@ -5211,7 +5321,7 @@ void netdev_run_todo(void)
5211 5321
5212 while (!list_empty(&list)) { 5322 while (!list_empty(&list)) {
5213 struct net_device *dev 5323 struct net_device *dev
5214 = list_entry(list.next, struct net_device, todo_list); 5324 = list_first_entry(&list, struct net_device, todo_list);
5215 list_del(&dev->todo_list); 5325 list_del(&dev->todo_list);
5216 5326
5217 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { 5327 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
@@ -5362,6 +5472,8 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5362 5472
5363 netdev_init_queues(dev); 5473 netdev_init_queues(dev);
5364 5474
5475 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5476 dev->ethtool_ntuple_list.count = 0;
5365 INIT_LIST_HEAD(&dev->napi_list); 5477 INIT_LIST_HEAD(&dev->napi_list);
5366 INIT_LIST_HEAD(&dev->unreg_list); 5478 INIT_LIST_HEAD(&dev->unreg_list);
5367 INIT_LIST_HEAD(&dev->link_watch_list); 5479 INIT_LIST_HEAD(&dev->link_watch_list);
@@ -5398,6 +5510,9 @@ void free_netdev(struct net_device *dev)
5398 /* Flush device addresses */ 5510 /* Flush device addresses */
5399 dev_addr_flush(dev); 5511 dev_addr_flush(dev);
5400 5512
5513 /* Clear ethtool n-tuple list */
5514 ethtool_ntuple_flush(dev);
5515
5401 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 5516 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5402 netif_napi_del(p); 5517 netif_napi_del(p);
5403 5518
@@ -5597,6 +5712,12 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
5597 /* Notify protocols, that a new device appeared. */ 5712 /* Notify protocols, that a new device appeared. */
5598 call_netdevice_notifiers(NETDEV_REGISTER, dev); 5713 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5599 5714
5715 /*
5716 * Prevent userspace races by waiting until the network
5717 * device is fully setup before sending notifications.
5718 */
5719 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5720
5600 synchronize_net(); 5721 synchronize_net();
5601 err = 0; 5722 err = 0;
5602out: 5723out:
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 9e2fa39f22a3..3dc295beb483 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -96,7 +96,10 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
96 int err; 96 int err;
97 97
98 netif_addr_lock_bh(dev); 98 netif_addr_lock_bh(dev);
99 err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl); 99 if (alen != dev->addr_len)
100 err = -EINVAL;
101 else
102 err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl);
100 if (!err) 103 if (!err)
101 __dev_set_rx_mode(dev); 104 __dev_set_rx_mode(dev);
102 netif_addr_unlock_bh(dev); 105 netif_addr_unlock_bh(dev);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index b8e9d3a86887..f8c874975350 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -296,7 +296,6 @@ static int dropmon_net_event(struct notifier_block *ev_block,
296 296
297 new_stat->dev = dev; 297 new_stat->dev = dev;
298 new_stat->last_rx = jiffies; 298 new_stat->last_rx = jiffies;
299 INIT_RCU_HEAD(&new_stat->rcu);
300 spin_lock(&trace_state_lock); 299 spin_lock(&trace_state_lock);
301 list_add_rcu(&new_stat->list, &hw_stats_list); 300 list_add_rcu(&new_stat->list, &hw_stats_list);
302 spin_unlock(&trace_state_lock); 301 spin_unlock(&trace_state_lock);
diff --git a/net/core/dst.c b/net/core/dst.c
index 57bc4d5b8d08..cb1b3488b739 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -17,6 +17,7 @@
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/types.h> 18#include <linux/types.h>
19#include <net/net_namespace.h> 19#include <net/net_namespace.h>
20#include <linux/sched.h>
20 21
21#include <net/dst.h> 22#include <net/dst.h>
22 23
@@ -79,6 +80,7 @@ loop:
79 while ((dst = next) != NULL) { 80 while ((dst = next) != NULL) {
80 next = dst->next; 81 next = dst->next;
81 prefetch(&next->next); 82 prefetch(&next->next);
83 cond_resched();
82 if (likely(atomic_read(&dst->__refcnt))) { 84 if (likely(atomic_read(&dst->__refcnt))) {
83 last->next = dst; 85 last->next = dst;
84 last = dst; 86 last = dst;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index d8aee584e8d1..f4cb6b6299d9 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -17,6 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/ethtool.h> 18#include <linux/ethtool.h>
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/bitops.h>
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
21 22
22/* 23/*
@@ -120,7 +121,7 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data)
120 * NETIF_F_xxx values in include/linux/netdevice.h 121 * NETIF_F_xxx values in include/linux/netdevice.h
121 */ 122 */
122static const u32 flags_dup_features = 123static const u32 flags_dup_features =
123 ETH_FLAG_LRO; 124 (ETH_FLAG_LRO | ETH_FLAG_NTUPLE);
124 125
125u32 ethtool_op_get_flags(struct net_device *dev) 126u32 ethtool_op_get_flags(struct net_device *dev)
126{ 127{
@@ -134,19 +135,44 @@ u32 ethtool_op_get_flags(struct net_device *dev)
134 135
135int ethtool_op_set_flags(struct net_device *dev, u32 data) 136int ethtool_op_set_flags(struct net_device *dev, u32 data)
136{ 137{
138 const struct ethtool_ops *ops = dev->ethtool_ops;
139 unsigned long features = dev->features;
140
137 if (data & ETH_FLAG_LRO) 141 if (data & ETH_FLAG_LRO)
138 dev->features |= NETIF_F_LRO; 142 features |= NETIF_F_LRO;
139 else 143 else
140 dev->features &= ~NETIF_F_LRO; 144 features &= ~NETIF_F_LRO;
145
146 if (data & ETH_FLAG_NTUPLE) {
147 if (!ops->set_rx_ntuple)
148 return -EOPNOTSUPP;
149 features |= NETIF_F_NTUPLE;
150 } else {
151 /* safe to clear regardless */
152 features &= ~NETIF_F_NTUPLE;
153 }
141 154
155 dev->features = features;
142 return 0; 156 return 0;
143} 157}
144 158
159void ethtool_ntuple_flush(struct net_device *dev)
160{
161 struct ethtool_rx_ntuple_flow_spec_container *fsc, *f;
162
163 list_for_each_entry_safe(fsc, f, &dev->ethtool_ntuple_list.list, list) {
164 list_del(&fsc->list);
165 kfree(fsc);
166 }
167 dev->ethtool_ntuple_list.count = 0;
168}
169EXPORT_SYMBOL(ethtool_ntuple_flush);
170
145/* Handlers for each ethtool command */ 171/* Handlers for each ethtool command */
146 172
147static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) 173static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
148{ 174{
149 struct ethtool_cmd cmd = { ETHTOOL_GSET }; 175 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
150 int err; 176 int err;
151 177
152 if (!dev->ethtool_ops->get_settings) 178 if (!dev->ethtool_ops->get_settings)
@@ -174,7 +200,7 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
174 return dev->ethtool_ops->set_settings(dev, &cmd); 200 return dev->ethtool_ops->set_settings(dev, &cmd);
175} 201}
176 202
177static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) 203static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
178{ 204{
179 struct ethtool_drvinfo info; 205 struct ethtool_drvinfo info;
180 const struct ethtool_ops *ops = dev->ethtool_ops; 206 const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -186,6 +212,10 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
186 info.cmd = ETHTOOL_GDRVINFO; 212 info.cmd = ETHTOOL_GDRVINFO;
187 ops->get_drvinfo(dev, &info); 213 ops->get_drvinfo(dev, &info);
188 214
215 /*
216 * this method of obtaining string set info is deprecated;
217 * Use ETHTOOL_GSSET_INFO instead.
218 */
189 if (ops->get_sset_count) { 219 if (ops->get_sset_count) {
190 int rc; 220 int rc;
191 221
@@ -209,7 +239,67 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
209 return 0; 239 return 0;
210} 240}
211 241
212static int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) 242static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
243 void __user *useraddr)
244{
245 struct ethtool_sset_info info;
246 const struct ethtool_ops *ops = dev->ethtool_ops;
247 u64 sset_mask;
248 int i, idx = 0, n_bits = 0, ret, rc;
249 u32 *info_buf = NULL;
250
251 if (!ops->get_sset_count)
252 return -EOPNOTSUPP;
253
254 if (copy_from_user(&info, useraddr, sizeof(info)))
255 return -EFAULT;
256
257 /* store copy of mask, because we zero struct later on */
258 sset_mask = info.sset_mask;
259 if (!sset_mask)
260 return 0;
261
262 /* calculate size of return buffer */
263 n_bits = hweight64(sset_mask);
264
265 memset(&info, 0, sizeof(info));
266 info.cmd = ETHTOOL_GSSET_INFO;
267
268 info_buf = kzalloc(n_bits * sizeof(u32), GFP_USER);
269 if (!info_buf)
270 return -ENOMEM;
271
272 /*
273 * fill return buffer based on input bitmask and successful
274 * get_sset_count return
275 */
276 for (i = 0; i < 64; i++) {
277 if (!(sset_mask & (1ULL << i)))
278 continue;
279
280 rc = ops->get_sset_count(dev, i);
281 if (rc >= 0) {
282 info.sset_mask |= (1ULL << i);
283 info_buf[idx++] = rc;
284 }
285 }
286
287 ret = -EFAULT;
288 if (copy_to_user(useraddr, &info, sizeof(info)))
289 goto out;
290
291 useraddr += offsetof(struct ethtool_sset_info, data);
292 if (copy_to_user(useraddr, info_buf, idx * sizeof(u32)))
293 goto out;
294
295 ret = 0;
296
297out:
298 kfree(info_buf);
299 return ret;
300}
301
302static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
213{ 303{
214 struct ethtool_rxnfc cmd; 304 struct ethtool_rxnfc cmd;
215 305
@@ -222,7 +312,7 @@ static int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
222 return dev->ethtool_ops->set_rxnfc(dev, &cmd); 312 return dev->ethtool_ops->set_rxnfc(dev, &cmd);
223} 313}
224 314
225static int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) 315static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
226{ 316{
227 struct ethtool_rxnfc info; 317 struct ethtool_rxnfc info;
228 const struct ethtool_ops *ops = dev->ethtool_ops; 318 const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -266,6 +356,312 @@ err_out:
266 return ret; 356 return ret;
267} 357}
268 358
359static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
360 struct ethtool_rx_ntuple_flow_spec *spec,
361 struct ethtool_rx_ntuple_flow_spec_container *fsc)
362{
363
364 /* don't add filters forever */
365 if (list->count >= ETHTOOL_MAX_NTUPLE_LIST_ENTRY) {
366 /* free the container */
367 kfree(fsc);
368 return;
369 }
370
371 /* Copy the whole filter over */
372 fsc->fs.flow_type = spec->flow_type;
373 memcpy(&fsc->fs.h_u, &spec->h_u, sizeof(spec->h_u));
374 memcpy(&fsc->fs.m_u, &spec->m_u, sizeof(spec->m_u));
375
376 fsc->fs.vlan_tag = spec->vlan_tag;
377 fsc->fs.vlan_tag_mask = spec->vlan_tag_mask;
378 fsc->fs.data = spec->data;
379 fsc->fs.data_mask = spec->data_mask;
380 fsc->fs.action = spec->action;
381
382 /* add to the list */
383 list_add_tail_rcu(&fsc->list, &list->list);
384 list->count++;
385}
386
387static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr)
388{
389 struct ethtool_rx_ntuple cmd;
390 const struct ethtool_ops *ops = dev->ethtool_ops;
391 struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL;
392 int ret;
393
394 if (!(dev->features & NETIF_F_NTUPLE))
395 return -EINVAL;
396
397 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
398 return -EFAULT;
399
400 /*
401 * Cache filter in dev struct for GET operation only if
402 * the underlying driver doesn't have its own GET operation, and
403 * only if the filter was added successfully. First make sure we
404 * can allocate the filter, then continue if successful.
405 */
406 if (!ops->get_rx_ntuple) {
407 fsc = kmalloc(sizeof(*fsc), GFP_ATOMIC);
408 if (!fsc)
409 return -ENOMEM;
410 }
411
412 ret = ops->set_rx_ntuple(dev, &cmd);
413 if (ret) {
414 kfree(fsc);
415 return ret;
416 }
417
418 if (!ops->get_rx_ntuple)
419 __rx_ntuple_filter_add(&dev->ethtool_ntuple_list, &cmd.fs, fsc);
420
421 return ret;
422}
423
424static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
425{
426 struct ethtool_gstrings gstrings;
427 const struct ethtool_ops *ops = dev->ethtool_ops;
428 struct ethtool_rx_ntuple_flow_spec_container *fsc;
429 u8 *data;
430 char *p;
431 int ret, i, num_strings = 0;
432
433 if (!ops->get_sset_count)
434 return -EOPNOTSUPP;
435
436 if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
437 return -EFAULT;
438
439 ret = ops->get_sset_count(dev, gstrings.string_set);
440 if (ret < 0)
441 return ret;
442
443 gstrings.len = ret;
444
445 data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
446 if (!data)
447 return -ENOMEM;
448
449 if (ops->get_rx_ntuple) {
450 /* driver-specific filter grab */
451 ret = ops->get_rx_ntuple(dev, gstrings.string_set, data);
452 goto copy;
453 }
454
455 /* default ethtool filter grab */
456 i = 0;
457 p = (char *)data;
458 list_for_each_entry(fsc, &dev->ethtool_ntuple_list.list, list) {
459 sprintf(p, "Filter %d:\n", i);
460 p += ETH_GSTRING_LEN;
461 num_strings++;
462
463 switch (fsc->fs.flow_type) {
464 case TCP_V4_FLOW:
465 sprintf(p, "\tFlow Type: TCP\n");
466 p += ETH_GSTRING_LEN;
467 num_strings++;
468 break;
469 case UDP_V4_FLOW:
470 sprintf(p, "\tFlow Type: UDP\n");
471 p += ETH_GSTRING_LEN;
472 num_strings++;
473 break;
474 case SCTP_V4_FLOW:
475 sprintf(p, "\tFlow Type: SCTP\n");
476 p += ETH_GSTRING_LEN;
477 num_strings++;
478 break;
479 case AH_ESP_V4_FLOW:
480 sprintf(p, "\tFlow Type: AH ESP\n");
481 p += ETH_GSTRING_LEN;
482 num_strings++;
483 break;
484 case ESP_V4_FLOW:
485 sprintf(p, "\tFlow Type: ESP\n");
486 p += ETH_GSTRING_LEN;
487 num_strings++;
488 break;
489 case IP_USER_FLOW:
490 sprintf(p, "\tFlow Type: Raw IP\n");
491 p += ETH_GSTRING_LEN;
492 num_strings++;
493 break;
494 case IPV4_FLOW:
495 sprintf(p, "\tFlow Type: IPv4\n");
496 p += ETH_GSTRING_LEN;
497 num_strings++;
498 break;
499 default:
500 sprintf(p, "\tFlow Type: Unknown\n");
501 p += ETH_GSTRING_LEN;
502 num_strings++;
503 goto unknown_filter;
504 };
505
506 /* now the rest of the filters */
507 switch (fsc->fs.flow_type) {
508 case TCP_V4_FLOW:
509 case UDP_V4_FLOW:
510 case SCTP_V4_FLOW:
511 sprintf(p, "\tSrc IP addr: 0x%x\n",
512 fsc->fs.h_u.tcp_ip4_spec.ip4src);
513 p += ETH_GSTRING_LEN;
514 num_strings++;
515 sprintf(p, "\tSrc IP mask: 0x%x\n",
516 fsc->fs.m_u.tcp_ip4_spec.ip4src);
517 p += ETH_GSTRING_LEN;
518 num_strings++;
519 sprintf(p, "\tDest IP addr: 0x%x\n",
520 fsc->fs.h_u.tcp_ip4_spec.ip4dst);
521 p += ETH_GSTRING_LEN;
522 num_strings++;
523 sprintf(p, "\tDest IP mask: 0x%x\n",
524 fsc->fs.m_u.tcp_ip4_spec.ip4dst);
525 p += ETH_GSTRING_LEN;
526 num_strings++;
527 sprintf(p, "\tSrc Port: %d, mask: 0x%x\n",
528 fsc->fs.h_u.tcp_ip4_spec.psrc,
529 fsc->fs.m_u.tcp_ip4_spec.psrc);
530 p += ETH_GSTRING_LEN;
531 num_strings++;
532 sprintf(p, "\tDest Port: %d, mask: 0x%x\n",
533 fsc->fs.h_u.tcp_ip4_spec.pdst,
534 fsc->fs.m_u.tcp_ip4_spec.pdst);
535 p += ETH_GSTRING_LEN;
536 num_strings++;
537 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
538 fsc->fs.h_u.tcp_ip4_spec.tos,
539 fsc->fs.m_u.tcp_ip4_spec.tos);
540 p += ETH_GSTRING_LEN;
541 num_strings++;
542 break;
543 case AH_ESP_V4_FLOW:
544 case ESP_V4_FLOW:
545 sprintf(p, "\tSrc IP addr: 0x%x\n",
546 fsc->fs.h_u.ah_ip4_spec.ip4src);
547 p += ETH_GSTRING_LEN;
548 num_strings++;
549 sprintf(p, "\tSrc IP mask: 0x%x\n",
550 fsc->fs.m_u.ah_ip4_spec.ip4src);
551 p += ETH_GSTRING_LEN;
552 num_strings++;
553 sprintf(p, "\tDest IP addr: 0x%x\n",
554 fsc->fs.h_u.ah_ip4_spec.ip4dst);
555 p += ETH_GSTRING_LEN;
556 num_strings++;
557 sprintf(p, "\tDest IP mask: 0x%x\n",
558 fsc->fs.m_u.ah_ip4_spec.ip4dst);
559 p += ETH_GSTRING_LEN;
560 num_strings++;
561 sprintf(p, "\tSPI: %d, mask: 0x%x\n",
562 fsc->fs.h_u.ah_ip4_spec.spi,
563 fsc->fs.m_u.ah_ip4_spec.spi);
564 p += ETH_GSTRING_LEN;
565 num_strings++;
566 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
567 fsc->fs.h_u.ah_ip4_spec.tos,
568 fsc->fs.m_u.ah_ip4_spec.tos);
569 p += ETH_GSTRING_LEN;
570 num_strings++;
571 break;
572 case IP_USER_FLOW:
573 sprintf(p, "\tSrc IP addr: 0x%x\n",
574 fsc->fs.h_u.raw_ip4_spec.ip4src);
575 p += ETH_GSTRING_LEN;
576 num_strings++;
577 sprintf(p, "\tSrc IP mask: 0x%x\n",
578 fsc->fs.m_u.raw_ip4_spec.ip4src);
579 p += ETH_GSTRING_LEN;
580 num_strings++;
581 sprintf(p, "\tDest IP addr: 0x%x\n",
582 fsc->fs.h_u.raw_ip4_spec.ip4dst);
583 p += ETH_GSTRING_LEN;
584 num_strings++;
585 sprintf(p, "\tDest IP mask: 0x%x\n",
586 fsc->fs.m_u.raw_ip4_spec.ip4dst);
587 p += ETH_GSTRING_LEN;
588 num_strings++;
589 break;
590 case IPV4_FLOW:
591 sprintf(p, "\tSrc IP addr: 0x%x\n",
592 fsc->fs.h_u.usr_ip4_spec.ip4src);
593 p += ETH_GSTRING_LEN;
594 num_strings++;
595 sprintf(p, "\tSrc IP mask: 0x%x\n",
596 fsc->fs.m_u.usr_ip4_spec.ip4src);
597 p += ETH_GSTRING_LEN;
598 num_strings++;
599 sprintf(p, "\tDest IP addr: 0x%x\n",
600 fsc->fs.h_u.usr_ip4_spec.ip4dst);
601 p += ETH_GSTRING_LEN;
602 num_strings++;
603 sprintf(p, "\tDest IP mask: 0x%x\n",
604 fsc->fs.m_u.usr_ip4_spec.ip4dst);
605 p += ETH_GSTRING_LEN;
606 num_strings++;
607 sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n",
608 fsc->fs.h_u.usr_ip4_spec.l4_4_bytes,
609 fsc->fs.m_u.usr_ip4_spec.l4_4_bytes);
610 p += ETH_GSTRING_LEN;
611 num_strings++;
612 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
613 fsc->fs.h_u.usr_ip4_spec.tos,
614 fsc->fs.m_u.usr_ip4_spec.tos);
615 p += ETH_GSTRING_LEN;
616 num_strings++;
617 sprintf(p, "\tIP Version: %d, mask: 0x%x\n",
618 fsc->fs.h_u.usr_ip4_spec.ip_ver,
619 fsc->fs.m_u.usr_ip4_spec.ip_ver);
620 p += ETH_GSTRING_LEN;
621 num_strings++;
622 sprintf(p, "\tProtocol: %d, mask: 0x%x\n",
623 fsc->fs.h_u.usr_ip4_spec.proto,
624 fsc->fs.m_u.usr_ip4_spec.proto);
625 p += ETH_GSTRING_LEN;
626 num_strings++;
627 break;
628 };
629 sprintf(p, "\tVLAN: %d, mask: 0x%x\n",
630 fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask);
631 p += ETH_GSTRING_LEN;
632 num_strings++;
633 sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data);
634 p += ETH_GSTRING_LEN;
635 num_strings++;
636 sprintf(p, "\tUser-defined mask: 0x%Lx\n", fsc->fs.data_mask);
637 p += ETH_GSTRING_LEN;
638 num_strings++;
639 if (fsc->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
640 sprintf(p, "\tAction: Drop\n");
641 else
642 sprintf(p, "\tAction: Direct to queue %d\n",
643 fsc->fs.action);
644 p += ETH_GSTRING_LEN;
645 num_strings++;
646unknown_filter:
647 i++;
648 }
649copy:
650 /* indicate to userspace how many strings we actually have */
651 gstrings.len = num_strings;
652 ret = -EFAULT;
653 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
654 goto out;
655 useraddr += sizeof(gstrings);
656 if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
657 goto out;
658 ret = 0;
659
660out:
661 kfree(data);
662 return ret;
663}
664
269static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) 665static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
270{ 666{
271 struct ethtool_regs regs; 667 struct ethtool_regs regs;
@@ -324,7 +720,7 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr)
324 720
325static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) 721static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
326{ 722{
327 struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; 723 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
328 724
329 if (!dev->ethtool_ops->get_wol) 725 if (!dev->ethtool_ops->get_wol)
330 return -EOPNOTSUPP; 726 return -EOPNOTSUPP;
@@ -456,9 +852,9 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
456 return ret; 852 return ret;
457} 853}
458 854
459static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) 855static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
460{ 856{
461 struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; 857 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE };
462 858
463 if (!dev->ethtool_ops->get_coalesce) 859 if (!dev->ethtool_ops->get_coalesce)
464 return -EOPNOTSUPP; 860 return -EOPNOTSUPP;
@@ -470,7 +866,7 @@ static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
470 return 0; 866 return 0;
471} 867}
472 868
473static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) 869static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
474{ 870{
475 struct ethtool_coalesce coalesce; 871 struct ethtool_coalesce coalesce;
476 872
@@ -485,7 +881,7 @@ static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
485 881
486static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) 882static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
487{ 883{
488 struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; 884 struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM };
489 885
490 if (!dev->ethtool_ops->get_ringparam) 886 if (!dev->ethtool_ops->get_ringparam)
491 return -EOPNOTSUPP; 887 return -EOPNOTSUPP;
@@ -839,7 +1235,7 @@ static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr)
839static int ethtool_get_value(struct net_device *dev, char __user *useraddr, 1235static int ethtool_get_value(struct net_device *dev, char __user *useraddr,
840 u32 cmd, u32 (*actor)(struct net_device *)) 1236 u32 cmd, u32 (*actor)(struct net_device *))
841{ 1237{
842 struct ethtool_value edata = { cmd }; 1238 struct ethtool_value edata = { .cmd = cmd };
843 1239
844 if (!actor) 1240 if (!actor)
845 return -EOPNOTSUPP; 1241 return -EOPNOTSUPP;
@@ -880,7 +1276,7 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
880 return actor(dev, edata.data); 1276 return actor(dev, edata.data);
881} 1277}
882 1278
883static int ethtool_flash_device(struct net_device *dev, char __user *useraddr) 1279static noinline_for_stack int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
884{ 1280{
885 struct ethtool_flash efl; 1281 struct ethtool_flash efl;
886 1282
@@ -927,6 +1323,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
927 case ETHTOOL_GPERMADDR: 1323 case ETHTOOL_GPERMADDR:
928 case ETHTOOL_GUFO: 1324 case ETHTOOL_GUFO:
929 case ETHTOOL_GGSO: 1325 case ETHTOOL_GGSO:
1326 case ETHTOOL_GGRO:
930 case ETHTOOL_GFLAGS: 1327 case ETHTOOL_GFLAGS:
931 case ETHTOOL_GPFLAGS: 1328 case ETHTOOL_GPFLAGS:
932 case ETHTOOL_GRXFH: 1329 case ETHTOOL_GRXFH:
@@ -1112,6 +1509,15 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1112 case ETHTOOL_RESET: 1509 case ETHTOOL_RESET:
1113 rc = ethtool_reset(dev, useraddr); 1510 rc = ethtool_reset(dev, useraddr);
1114 break; 1511 break;
1512 case ETHTOOL_SRXNTUPLE:
1513 rc = ethtool_set_rx_ntuple(dev, useraddr);
1514 break;
1515 case ETHTOOL_GRXNTUPLE:
1516 rc = ethtool_get_rx_ntuple(dev, useraddr);
1517 break;
1518 case ETHTOOL_GSSET_INFO:
1519 rc = ethtool_get_sset_info(dev, useraddr);
1520 break;
1115 default: 1521 default:
1116 rc = -EOPNOTSUPP; 1522 rc = -EOPNOTSUPP;
1117 } 1523 }
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 02a3b2c69c1e..9a24377146bf 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -708,7 +708,7 @@ static struct notifier_block fib_rules_notifier = {
708 .notifier_call = fib_rules_event, 708 .notifier_call = fib_rules_event,
709}; 709};
710 710
711static int fib_rules_net_init(struct net *net) 711static int __net_init fib_rules_net_init(struct net *net)
712{ 712{
713 INIT_LIST_HEAD(&net->rules_ops); 713 INIT_LIST_HEAD(&net->rules_ops);
714 spin_lock_init(&net->rules_mod_lock); 714 spin_lock_init(&net->rules_mod_lock);
diff --git a/net/core/filter.c b/net/core/filter.c
index 08db7b9143a3..d38ef7fd50f0 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -86,7 +86,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
86 return err; 86 return err;
87 87
88 rcu_read_lock_bh(); 88 rcu_read_lock_bh();
89 filter = rcu_dereference(sk->sk_filter); 89 filter = rcu_dereference_bh(sk->sk_filter);
90 if (filter) { 90 if (filter) {
91 unsigned int pkt_len = sk_run_filter(skb, filter->insns, 91 unsigned int pkt_len = sk_run_filter(skb, filter->insns,
92 filter->len); 92 filter->len);
@@ -521,7 +521,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
521 } 521 }
522 522
523 rcu_read_lock_bh(); 523 rcu_read_lock_bh();
524 old_fp = rcu_dereference(sk->sk_filter); 524 old_fp = rcu_dereference_bh(sk->sk_filter);
525 rcu_assign_pointer(sk->sk_filter, fp); 525 rcu_assign_pointer(sk->sk_filter, fp);
526 rcu_read_unlock_bh(); 526 rcu_read_unlock_bh();
527 527
@@ -529,6 +529,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
529 sk_filter_delayed_uncharge(sk, old_fp); 529 sk_filter_delayed_uncharge(sk, old_fp);
530 return 0; 530 return 0;
531} 531}
532EXPORT_SYMBOL_GPL(sk_attach_filter);
532 533
533int sk_detach_filter(struct sock *sk) 534int sk_detach_filter(struct sock *sk)
534{ 535{
@@ -536,7 +537,7 @@ int sk_detach_filter(struct sock *sk)
536 struct sk_filter *filter; 537 struct sk_filter *filter;
537 538
538 rcu_read_lock_bh(); 539 rcu_read_lock_bh();
539 filter = rcu_dereference(sk->sk_filter); 540 filter = rcu_dereference_bh(sk->sk_filter);
540 if (filter) { 541 if (filter) {
541 rcu_assign_pointer(sk->sk_filter, NULL); 542 rcu_assign_pointer(sk->sk_filter, NULL);
542 sk_filter_delayed_uncharge(sk, filter); 543 sk_filter_delayed_uncharge(sk, filter);
@@ -545,3 +546,4 @@ int sk_detach_filter(struct sock *sk)
545 rcu_read_unlock_bh(); 546 rcu_read_unlock_bh();
546 return ret; 547 return ret;
547} 548}
549EXPORT_SYMBOL_GPL(sk_detach_filter);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f35377b643e4..6cee6434da67 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -771,6 +771,8 @@ static __inline__ int neigh_max_probes(struct neighbour *n)
771} 771}
772 772
773static void neigh_invalidate(struct neighbour *neigh) 773static void neigh_invalidate(struct neighbour *neigh)
774 __releases(neigh->lock)
775 __acquires(neigh->lock)
774{ 776{
775 struct sk_buff *skb; 777 struct sk_buff *skb;
776 778
@@ -2417,8 +2419,7 @@ EXPORT_SYMBOL(neigh_seq_stop);
2417 2419
2418static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) 2420static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2419{ 2421{
2420 struct proc_dir_entry *pde = seq->private; 2422 struct neigh_table *tbl = seq->private;
2421 struct neigh_table *tbl = pde->data;
2422 int cpu; 2423 int cpu;
2423 2424
2424 if (*pos == 0) 2425 if (*pos == 0)
@@ -2435,8 +2436,7 @@ static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2435 2436
2436static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2437static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2437{ 2438{
2438 struct proc_dir_entry *pde = seq->private; 2439 struct neigh_table *tbl = seq->private;
2439 struct neigh_table *tbl = pde->data;
2440 int cpu; 2440 int cpu;
2441 2441
2442 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { 2442 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
@@ -2455,8 +2455,7 @@ static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2455 2455
2456static int neigh_stat_seq_show(struct seq_file *seq, void *v) 2456static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2457{ 2457{
2458 struct proc_dir_entry *pde = seq->private; 2458 struct neigh_table *tbl = seq->private;
2459 struct neigh_table *tbl = pde->data;
2460 struct neigh_statistics *st = v; 2459 struct neigh_statistics *st = v;
2461 2460
2462 if (v == SEQ_START_TOKEN) { 2461 if (v == SEQ_START_TOKEN) {
@@ -2501,7 +2500,7 @@ static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2501 2500
2502 if (!ret) { 2501 if (!ret) {
2503 struct seq_file *sf = file->private_data; 2502 struct seq_file *sf = file->private_data;
2504 sf->private = PDE(inode); 2503 sf->private = PDE(inode)->data;
2505 } 2504 }
2506 return ret; 2505 return ret;
2507}; 2506};
@@ -2559,9 +2558,11 @@ EXPORT_SYMBOL(neigh_app_ns);
2559 2558
2560#ifdef CONFIG_SYSCTL 2559#ifdef CONFIG_SYSCTL
2561 2560
2561#define NEIGH_VARS_MAX 19
2562
2562static struct neigh_sysctl_table { 2563static struct neigh_sysctl_table {
2563 struct ctl_table_header *sysctl_header; 2564 struct ctl_table_header *sysctl_header;
2564 struct ctl_table neigh_vars[__NET_NEIGH_MAX]; 2565 struct ctl_table neigh_vars[NEIGH_VARS_MAX];
2565 char *dev_name; 2566 char *dev_name;
2566} neigh_sysctl_template __read_mostly = { 2567} neigh_sysctl_template __read_mostly = {
2567 .neigh_vars = { 2568 .neigh_vars = {
@@ -2678,8 +2679,7 @@ static struct neigh_sysctl_table {
2678}; 2679};
2679 2680
2680int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, 2681int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2681 int p_id, int pdev_id, char *p_name, 2682 char *p_name, proc_handler *handler)
2682 proc_handler *handler)
2683{ 2683{
2684 struct neigh_sysctl_table *t; 2684 struct neigh_sysctl_table *t;
2685 const char *dev_name_source = NULL; 2685 const char *dev_name_source = NULL;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index fbc1c7472c5e..099c753c4213 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -410,7 +410,8 @@ static ssize_t wireless_show(struct device *d, char *buf,
410 const struct iw_statistics *iw; 410 const struct iw_statistics *iw;
411 ssize_t ret = -EINVAL; 411 ssize_t ret = -EINVAL;
412 412
413 rtnl_lock(); 413 if (!rtnl_trylock())
414 return restart_syscall();
414 if (dev_isalive(dev)) { 415 if (dev_isalive(dev)) {
415 iw = get_wireless_stats(dev); 416 iw = get_wireless_stats(dev);
416 if (iw) 417 if (iw)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 0b4d0d35ef40..6f9206b36dc2 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -407,11 +407,24 @@ static void arp_reply(struct sk_buff *skb)
407 __be32 sip, tip; 407 __be32 sip, tip;
408 unsigned char *sha; 408 unsigned char *sha;
409 struct sk_buff *send_skb; 409 struct sk_buff *send_skb;
410 struct netpoll *np = NULL; 410 struct netpoll *np, *tmp;
411 unsigned long flags;
412 int hits = 0;
413
414 if (list_empty(&npinfo->rx_np))
415 return;
416
417 /* Before checking the packet, we do some early
418 inspection whether this is interesting at all */
419 spin_lock_irqsave(&npinfo->rx_lock, flags);
420 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
421 if (np->dev == skb->dev)
422 hits++;
423 }
424 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
411 425
412 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) 426 /* No netpoll struct is using this dev */
413 np = npinfo->rx_np; 427 if (!hits)
414 if (!np)
415 return; 428 return;
416 429
417 /* No arp on this interface */ 430 /* No arp on this interface */
@@ -437,77 +450,91 @@ static void arp_reply(struct sk_buff *skb)
437 arp_ptr += skb->dev->addr_len; 450 arp_ptr += skb->dev->addr_len;
438 memcpy(&sip, arp_ptr, 4); 451 memcpy(&sip, arp_ptr, 4);
439 arp_ptr += 4; 452 arp_ptr += 4;
440 /* if we actually cared about dst hw addr, it would get copied here */ 453 /* If we actually cared about dst hw addr,
454 it would get copied here */
441 arp_ptr += skb->dev->addr_len; 455 arp_ptr += skb->dev->addr_len;
442 memcpy(&tip, arp_ptr, 4); 456 memcpy(&tip, arp_ptr, 4);
443 457
444 /* Should we ignore arp? */ 458 /* Should we ignore arp? */
445 if (tip != np->local_ip || 459 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
446 ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
447 return; 460 return;
448 461
449 size = arp_hdr_len(skb->dev); 462 size = arp_hdr_len(skb->dev);
450 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
451 LL_RESERVED_SPACE(np->dev));
452 463
453 if (!send_skb) 464 spin_lock_irqsave(&npinfo->rx_lock, flags);
454 return; 465 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
455 466 if (tip != np->local_ip)
456 skb_reset_network_header(send_skb); 467 continue;
457 arp = (struct arphdr *) skb_put(send_skb, size);
458 send_skb->dev = skb->dev;
459 send_skb->protocol = htons(ETH_P_ARP);
460 468
461 /* Fill the device header for the ARP frame */ 469 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
462 if (dev_hard_header(send_skb, skb->dev, ptype, 470 LL_RESERVED_SPACE(np->dev));
463 sha, np->dev->dev_addr, 471 if (!send_skb)
464 send_skb->len) < 0) { 472 continue;
465 kfree_skb(send_skb);
466 return;
467 }
468 473
469 /* 474 skb_reset_network_header(send_skb);
470 * Fill out the arp protocol part. 475 arp = (struct arphdr *) skb_put(send_skb, size);
471 * 476 send_skb->dev = skb->dev;
472 * we only support ethernet device type, 477 send_skb->protocol = htons(ETH_P_ARP);
473 * which (according to RFC 1390) should always equal 1 (Ethernet).
474 */
475 478
476 arp->ar_hrd = htons(np->dev->type); 479 /* Fill the device header for the ARP frame */
477 arp->ar_pro = htons(ETH_P_IP); 480 if (dev_hard_header(send_skb, skb->dev, ptype,
478 arp->ar_hln = np->dev->addr_len; 481 sha, np->dev->dev_addr,
479 arp->ar_pln = 4; 482 send_skb->len) < 0) {
480 arp->ar_op = htons(type); 483 kfree_skb(send_skb);
484 continue;
485 }
481 486
482 arp_ptr=(unsigned char *)(arp + 1); 487 /*
483 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len); 488 * Fill out the arp protocol part.
484 arp_ptr += np->dev->addr_len; 489 *
485 memcpy(arp_ptr, &tip, 4); 490 * we only support ethernet device type,
486 arp_ptr += 4; 491 * which (according to RFC 1390) should
487 memcpy(arp_ptr, sha, np->dev->addr_len); 492 * always equal 1 (Ethernet).
488 arp_ptr += np->dev->addr_len; 493 */
489 memcpy(arp_ptr, &sip, 4);
490 494
491 netpoll_send_skb(np, send_skb); 495 arp->ar_hrd = htons(np->dev->type);
496 arp->ar_pro = htons(ETH_P_IP);
497 arp->ar_hln = np->dev->addr_len;
498 arp->ar_pln = 4;
499 arp->ar_op = htons(type);
500
501 arp_ptr = (unsigned char *)(arp + 1);
502 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
503 arp_ptr += np->dev->addr_len;
504 memcpy(arp_ptr, &tip, 4);
505 arp_ptr += 4;
506 memcpy(arp_ptr, sha, np->dev->addr_len);
507 arp_ptr += np->dev->addr_len;
508 memcpy(arp_ptr, &sip, 4);
509
510 netpoll_send_skb(np, send_skb);
511
512 /* If there are several rx_hooks for the same address,
513 we're fine by sending a single reply */
514 break;
515 }
516 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
492} 517}
493 518
494int __netpoll_rx(struct sk_buff *skb) 519int __netpoll_rx(struct sk_buff *skb)
495{ 520{
496 int proto, len, ulen; 521 int proto, len, ulen;
522 int hits = 0;
497 struct iphdr *iph; 523 struct iphdr *iph;
498 struct udphdr *uh; 524 struct udphdr *uh;
499 struct netpoll_info *npi = skb->dev->npinfo; 525 struct netpoll_info *npinfo = skb->dev->npinfo;
500 struct netpoll *np = npi->rx_np; 526 struct netpoll *np, *tmp;
501 527
502 if (!np) 528 if (list_empty(&npinfo->rx_np))
503 goto out; 529 goto out;
530
504 if (skb->dev->type != ARPHRD_ETHER) 531 if (skb->dev->type != ARPHRD_ETHER)
505 goto out; 532 goto out;
506 533
507 /* check if netpoll clients need ARP */ 534 /* check if netpoll clients need ARP */
508 if (skb->protocol == htons(ETH_P_ARP) && 535 if (skb->protocol == htons(ETH_P_ARP) &&
509 atomic_read(&trapped)) { 536 atomic_read(&trapped)) {
510 skb_queue_tail(&npi->arp_tx, skb); 537 skb_queue_tail(&npinfo->arp_tx, skb);
511 return 1; 538 return 1;
512 } 539 }
513 540
@@ -551,16 +578,23 @@ int __netpoll_rx(struct sk_buff *skb)
551 goto out; 578 goto out;
552 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr)) 579 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
553 goto out; 580 goto out;
554 if (np->local_ip && np->local_ip != iph->daddr)
555 goto out;
556 if (np->remote_ip && np->remote_ip != iph->saddr)
557 goto out;
558 if (np->local_port && np->local_port != ntohs(uh->dest))
559 goto out;
560 581
561 np->rx_hook(np, ntohs(uh->source), 582 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
562 (char *)(uh+1), 583 if (np->local_ip && np->local_ip != iph->daddr)
563 ulen - sizeof(struct udphdr)); 584 continue;
585 if (np->remote_ip && np->remote_ip != iph->saddr)
586 continue;
587 if (np->local_port && np->local_port != ntohs(uh->dest))
588 continue;
589
590 np->rx_hook(np, ntohs(uh->source),
591 (char *)(uh+1),
592 ulen - sizeof(struct udphdr));
593 hits++;
594 }
595
596 if (!hits)
597 goto out;
564 598
565 kfree_skb(skb); 599 kfree_skb(skb);
566 return 1; 600 return 1;
@@ -580,7 +614,7 @@ void netpoll_print_options(struct netpoll *np)
580 np->name, np->local_port); 614 np->name, np->local_port);
581 printk(KERN_INFO "%s: local IP %pI4\n", 615 printk(KERN_INFO "%s: local IP %pI4\n",
582 np->name, &np->local_ip); 616 np->name, &np->local_ip);
583 printk(KERN_INFO "%s: interface %s\n", 617 printk(KERN_INFO "%s: interface '%s'\n",
584 np->name, np->dev_name); 618 np->name, np->dev_name);
585 printk(KERN_INFO "%s: remote port %d\n", 619 printk(KERN_INFO "%s: remote port %d\n",
586 np->name, np->remote_port); 620 np->name, np->remote_port);
@@ -627,6 +661,9 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
627 if ((delim = strchr(cur, '@')) == NULL) 661 if ((delim = strchr(cur, '@')) == NULL)
628 goto parse_failed; 662 goto parse_failed;
629 *delim = 0; 663 *delim = 0;
664 if (*cur == ' ' || *cur == '\t')
665 printk(KERN_INFO "%s: warning: whitespace"
666 "is not allowed\n", np->name);
630 np->remote_port = simple_strtol(cur, NULL, 10); 667 np->remote_port = simple_strtol(cur, NULL, 10);
631 cur = delim; 668 cur = delim;
632 } 669 }
@@ -674,7 +711,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
674 return 0; 711 return 0;
675 712
676 parse_failed: 713 parse_failed:
677 printk(KERN_INFO "%s: couldn't parse config at %s!\n", 714 printk(KERN_INFO "%s: couldn't parse config at '%s'!\n",
678 np->name, cur); 715 np->name, cur);
679 return -1; 716 return -1;
680} 717}
@@ -684,6 +721,7 @@ int netpoll_setup(struct netpoll *np)
684 struct net_device *ndev = NULL; 721 struct net_device *ndev = NULL;
685 struct in_device *in_dev; 722 struct in_device *in_dev;
686 struct netpoll_info *npinfo; 723 struct netpoll_info *npinfo;
724 struct netpoll *npe, *tmp;
687 unsigned long flags; 725 unsigned long flags;
688 int err; 726 int err;
689 727
@@ -700,11 +738,11 @@ int netpoll_setup(struct netpoll *np)
700 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); 738 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
701 if (!npinfo) { 739 if (!npinfo) {
702 err = -ENOMEM; 740 err = -ENOMEM;
703 goto release; 741 goto put;
704 } 742 }
705 743
706 npinfo->rx_flags = 0; 744 npinfo->rx_flags = 0;
707 npinfo->rx_np = NULL; 745 INIT_LIST_HEAD(&npinfo->rx_np);
708 746
709 spin_lock_init(&npinfo->rx_lock); 747 spin_lock_init(&npinfo->rx_lock);
710 skb_queue_head_init(&npinfo->arp_tx); 748 skb_queue_head_init(&npinfo->arp_tx);
@@ -785,7 +823,7 @@ int netpoll_setup(struct netpoll *np)
785 if (np->rx_hook) { 823 if (np->rx_hook) {
786 spin_lock_irqsave(&npinfo->rx_lock, flags); 824 spin_lock_irqsave(&npinfo->rx_lock, flags);
787 npinfo->rx_flags |= NETPOLL_RX_ENABLED; 825 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
788 npinfo->rx_np = np; 826 list_add_tail(&np->rx, &npinfo->rx_np);
789 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 827 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
790 } 828 }
791 829
@@ -801,9 +839,16 @@ int netpoll_setup(struct netpoll *np)
801 return 0; 839 return 0;
802 840
803 release: 841 release:
804 if (!ndev->npinfo) 842 if (!ndev->npinfo) {
843 spin_lock_irqsave(&npinfo->rx_lock, flags);
844 list_for_each_entry_safe(npe, tmp, &npinfo->rx_np, rx) {
845 npe->dev = NULL;
846 }
847 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
848
805 kfree(npinfo); 849 kfree(npinfo);
806 np->dev = NULL; 850 }
851put:
807 dev_put(ndev); 852 dev_put(ndev);
808 return err; 853 return err;
809} 854}
@@ -823,10 +868,11 @@ void netpoll_cleanup(struct netpoll *np)
823 if (np->dev) { 868 if (np->dev) {
824 npinfo = np->dev->npinfo; 869 npinfo = np->dev->npinfo;
825 if (npinfo) { 870 if (npinfo) {
826 if (npinfo->rx_np == np) { 871 if (!list_empty(&npinfo->rx_np)) {
827 spin_lock_irqsave(&npinfo->rx_lock, flags); 872 spin_lock_irqsave(&npinfo->rx_lock, flags);
828 npinfo->rx_np = NULL; 873 list_del(&np->rx);
829 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; 874 if (list_empty(&npinfo->rx_np))
875 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
830 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 876 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
831 } 877 }
832 878
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index a23b45f08ec9..43923811bd6a 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -250,8 +250,7 @@ struct pktgen_dev {
250 __u64 count; /* Default No packets to send */ 250 __u64 count; /* Default No packets to send */
251 __u64 sofar; /* How many pkts we've sent so far */ 251 __u64 sofar; /* How many pkts we've sent so far */
252 __u64 tx_bytes; /* How many bytes we've transmitted */ 252 __u64 tx_bytes; /* How many bytes we've transmitted */
253 __u64 errors; /* Errors when trying to transmit, 253 __u64 errors; /* Errors when trying to transmit, */
254 pkts will be re-sent */
255 254
256 /* runtime counters relating to clone_skb */ 255 /* runtime counters relating to clone_skb */
257 256
@@ -2189,12 +2188,13 @@ static inline int f_pick(struct pktgen_dev *pkt_dev)
2189/* If there was already an IPSEC SA, we keep it as is, else 2188/* If there was already an IPSEC SA, we keep it as is, else
2190 * we go look for it ... 2189 * we go look for it ...
2191*/ 2190*/
2191#define DUMMY_MARK 0
2192static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) 2192static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
2193{ 2193{
2194 struct xfrm_state *x = pkt_dev->flows[flow].x; 2194 struct xfrm_state *x = pkt_dev->flows[flow].x;
2195 if (!x) { 2195 if (!x) {
2196 /*slow path: we dont already have xfrm_state*/ 2196 /*slow path: we dont already have xfrm_state*/
2197 x = xfrm_stateonly_find(&init_net, 2197 x = xfrm_stateonly_find(&init_net, DUMMY_MARK,
2198 (xfrm_address_t *)&pkt_dev->cur_daddr, 2198 (xfrm_address_t *)&pkt_dev->cur_daddr,
2199 (xfrm_address_t *)&pkt_dev->cur_saddr, 2199 (xfrm_address_t *)&pkt_dev->cur_saddr,
2200 AF_INET, 2200 AF_INET,
@@ -3465,6 +3465,12 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3465 pkt_dev->seq_num++; 3465 pkt_dev->seq_num++;
3466 pkt_dev->tx_bytes += pkt_dev->last_pkt_size; 3466 pkt_dev->tx_bytes += pkt_dev->last_pkt_size;
3467 break; 3467 break;
3468 case NET_XMIT_DROP:
3469 case NET_XMIT_CN:
3470 case NET_XMIT_POLICED:
3471 /* skb has been consumed */
3472 pkt_dev->errors++;
3473 break;
3468 default: /* Drivers are not supposed to return other values! */ 3474 default: /* Drivers are not supposed to return other values! */
3469 if (net_ratelimit()) 3475 if (net_ratelimit())
3470 pr_info("pktgen: %s xmit error: %d\n", 3476 pr_info("pktgen: %s xmit error: %d\n",
@@ -3519,6 +3525,7 @@ static int pktgen_thread_worker(void *arg)
3519 wait_event_interruptible_timeout(t->queue, 3525 wait_event_interruptible_timeout(t->queue,
3520 t->control != 0, 3526 t->control != 0,
3521 HZ/10); 3527 HZ/10);
3528 try_to_freeze();
3522 continue; 3529 continue;
3523 } 3530 }
3524 3531
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 33148a568199..4568120d8533 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -35,6 +35,7 @@
35#include <linux/security.h> 35#include <linux/security.h>
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/if_addr.h> 37#include <linux/if_addr.h>
38#include <linux/pci.h>
38 39
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
40#include <asm/system.h> 41#include <asm/system.h>
@@ -89,6 +90,14 @@ int rtnl_is_locked(void)
89} 90}
90EXPORT_SYMBOL(rtnl_is_locked); 91EXPORT_SYMBOL(rtnl_is_locked);
91 92
93#ifdef CONFIG_PROVE_LOCKING
94int lockdep_rtnl_is_held(void)
95{
96 return lockdep_is_held(&rtnl_mutex);
97}
98EXPORT_SYMBOL(lockdep_rtnl_is_held);
99#endif /* #ifdef CONFIG_PROVE_LOCKING */
100
92static struct rtnl_link *rtnl_msg_handlers[NPROTO]; 101static struct rtnl_link *rtnl_msg_handlers[NPROTO];
93 102
94static inline int rtm_msgindex(int msgtype) 103static inline int rtm_msgindex(int msgtype)
@@ -548,6 +557,19 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
548 } 557 }
549} 558}
550 559
560static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
561 const struct ifinfomsg *ifm)
562{
563 unsigned int flags = ifm->ifi_flags;
564
565 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
566 if (ifm->ifi_change)
567 flags = (flags & ifm->ifi_change) |
568 (dev->flags & ~ifm->ifi_change);
569
570 return flags;
571}
572
551static void copy_rtnl_link_stats(struct rtnl_link_stats *a, 573static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
552 const struct net_device_stats *b) 574 const struct net_device_stats *b)
553{ 575{
@@ -580,6 +602,15 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
580 a->tx_compressed = b->tx_compressed; 602 a->tx_compressed = b->tx_compressed;
581}; 603};
582 604
605static inline int rtnl_vfinfo_size(const struct net_device *dev)
606{
607 if (dev->dev.parent && dev_is_pci(dev->dev.parent))
608 return dev_num_vf(dev->dev.parent) *
609 sizeof(struct ifla_vf_info);
610 else
611 return 0;
612}
613
583static inline size_t if_nlmsg_size(const struct net_device *dev) 614static inline size_t if_nlmsg_size(const struct net_device *dev)
584{ 615{
585 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 616 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
@@ -597,6 +628,8 @@ static inline size_t if_nlmsg_size(const struct net_device *dev)
597 + nla_total_size(4) /* IFLA_MASTER */ 628 + nla_total_size(4) /* IFLA_MASTER */
598 + nla_total_size(1) /* IFLA_OPERSTATE */ 629 + nla_total_size(1) /* IFLA_OPERSTATE */
599 + nla_total_size(1) /* IFLA_LINKMODE */ 630 + nla_total_size(1) /* IFLA_LINKMODE */
631 + nla_total_size(4) /* IFLA_NUM_VF */
632 + nla_total_size(rtnl_vfinfo_size(dev)) /* IFLA_VFINFO */
600 + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ 633 + rtnl_link_get_size(dev); /* IFLA_LINKINFO */
601} 634}
602 635
@@ -665,6 +698,17 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
665 stats = dev_get_stats(dev); 698 stats = dev_get_stats(dev);
666 copy_rtnl_link_stats(nla_data(attr), stats); 699 copy_rtnl_link_stats(nla_data(attr), stats);
667 700
701 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) {
702 int i;
703 struct ifla_vf_info ivi;
704
705 NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
706 for (i = 0; i < dev_num_vf(dev->dev.parent); i++) {
707 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
708 break;
709 NLA_PUT(skb, IFLA_VFINFO, sizeof(ivi), &ivi);
710 }
711 }
668 if (dev->rtnl_link_ops) { 712 if (dev->rtnl_link_ops) {
669 if (rtnl_link_fill(skb, dev) < 0) 713 if (rtnl_link_fill(skb, dev) < 0)
670 goto nla_put_failure; 714 goto nla_put_failure;
@@ -725,6 +769,12 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
725 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 769 [IFLA_LINKINFO] = { .type = NLA_NESTED },
726 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 770 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
727 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, 771 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
772 [IFLA_VF_MAC] = { .type = NLA_BINARY,
773 .len = sizeof(struct ifla_vf_mac) },
774 [IFLA_VF_VLAN] = { .type = NLA_BINARY,
775 .len = sizeof(struct ifla_vf_vlan) },
776 [IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
777 .len = sizeof(struct ifla_vf_tx_rate) },
728}; 778};
729EXPORT_SYMBOL(ifla_policy); 779EXPORT_SYMBOL(ifla_policy);
730 780
@@ -875,13 +925,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
875 } 925 }
876 926
877 if (ifm->ifi_flags || ifm->ifi_change) { 927 if (ifm->ifi_flags || ifm->ifi_change) {
878 unsigned int flags = ifm->ifi_flags; 928 err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
879
880 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
881 if (ifm->ifi_change)
882 flags = (flags & ifm->ifi_change) |
883 (dev->flags & ~ifm->ifi_change);
884 err = dev_change_flags(dev, flags);
885 if (err < 0) 929 if (err < 0)
886 goto errout; 930 goto errout;
887 } 931 }
@@ -898,6 +942,41 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
898 write_unlock_bh(&dev_base_lock); 942 write_unlock_bh(&dev_base_lock);
899 } 943 }
900 944
945 if (tb[IFLA_VF_MAC]) {
946 struct ifla_vf_mac *ivm;
947 ivm = nla_data(tb[IFLA_VF_MAC]);
948 err = -EOPNOTSUPP;
949 if (ops->ndo_set_vf_mac)
950 err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac);
951 if (err < 0)
952 goto errout;
953 modified = 1;
954 }
955
956 if (tb[IFLA_VF_VLAN]) {
957 struct ifla_vf_vlan *ivv;
958 ivv = nla_data(tb[IFLA_VF_VLAN]);
959 err = -EOPNOTSUPP;
960 if (ops->ndo_set_vf_vlan)
961 err = ops->ndo_set_vf_vlan(dev, ivv->vf,
962 ivv->vlan,
963 ivv->qos);
964 if (err < 0)
965 goto errout;
966 modified = 1;
967 }
968 err = 0;
969
970 if (tb[IFLA_VF_TX_RATE]) {
971 struct ifla_vf_tx_rate *ivt;
972 ivt = nla_data(tb[IFLA_VF_TX_RATE]);
973 err = -EOPNOTSUPP;
974 if (ops->ndo_set_vf_tx_rate)
975 err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate);
976 if (err < 0)
977 goto errout;
978 modified = 1;
979 }
901 err = 0; 980 err = 0;
902 981
903errout: 982errout:
@@ -989,6 +1068,26 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
989 return 0; 1068 return 0;
990} 1069}
991 1070
1071int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
1072{
1073 unsigned int old_flags;
1074 int err;
1075
1076 old_flags = dev->flags;
1077 if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
1078 err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
1079 if (err < 0)
1080 return err;
1081 }
1082
1083 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
1084 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
1085
1086 __dev_notify_flags(dev, old_flags);
1087 return 0;
1088}
1089EXPORT_SYMBOL(rtnl_configure_link);
1090
992struct net_device *rtnl_create_link(struct net *src_net, struct net *net, 1091struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
993 char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]) 1092 char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[])
994{ 1093{
@@ -1010,6 +1109,7 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
1010 1109
1011 dev_net_set(dev, net); 1110 dev_net_set(dev, net);
1012 dev->rtnl_link_ops = ops; 1111 dev->rtnl_link_ops = ops;
1112 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
1013 dev->real_num_tx_queues = real_num_queues; 1113 dev->real_num_tx_queues = real_num_queues;
1014 1114
1015 if (strchr(dev->name, '%')) { 1115 if (strchr(dev->name, '%')) {
@@ -1139,7 +1239,7 @@ replay:
1139 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) 1239 if (!(nlh->nlmsg_flags & NLM_F_CREATE))
1140 return -ENODEV; 1240 return -ENODEV;
1141 1241
1142 if (ifm->ifi_index || ifm->ifi_flags || ifm->ifi_change) 1242 if (ifm->ifi_index)
1143 return -EOPNOTSUPP; 1243 return -EOPNOTSUPP;
1144 if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO]) 1244 if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
1145 return -EOPNOTSUPP; 1245 return -EOPNOTSUPP;
@@ -1170,9 +1270,15 @@ replay:
1170 err = ops->newlink(net, dev, tb, data); 1270 err = ops->newlink(net, dev, tb, data);
1171 else 1271 else
1172 err = register_netdevice(dev); 1272 err = register_netdevice(dev);
1173 if (err < 0 && !IS_ERR(dev)) 1273 if (err < 0 && !IS_ERR(dev)) {
1174 free_netdev(dev); 1274 free_netdev(dev);
1275 goto out;
1276 }
1175 1277
1278 err = rtnl_configure_link(dev, ifm);
1279 if (err < 0)
1280 unregister_netdevice(dev);
1281out:
1176 put_net(dest_net); 1282 put_net(dest_net);
1177 return err; 1283 return err;
1178 } 1284 }
@@ -1361,18 +1467,15 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
1361 struct net_device *dev = ptr; 1467 struct net_device *dev = ptr;
1362 1468
1363 switch (event) { 1469 switch (event) {
1364 case NETDEV_UNREGISTER:
1365 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
1366 break;
1367 case NETDEV_REGISTER:
1368 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
1369 break;
1370 case NETDEV_UP: 1470 case NETDEV_UP:
1371 case NETDEV_DOWN: 1471 case NETDEV_DOWN:
1372 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); 1472 case NETDEV_PRE_UP:
1373 break; 1473 case NETDEV_POST_INIT:
1474 case NETDEV_REGISTER:
1374 case NETDEV_CHANGE: 1475 case NETDEV_CHANGE:
1375 case NETDEV_GOING_DOWN: 1476 case NETDEV_GOING_DOWN:
1477 case NETDEV_UNREGISTER:
1478 case NETDEV_UNREGISTER_BATCH:
1376 break; 1479 break;
1377 default: 1480 default:
1378 rtmsg_ifinfo(RTM_NEWLINK, dev, 0); 1481 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
@@ -1386,7 +1489,7 @@ static struct notifier_block rtnetlink_dev_notifier = {
1386}; 1489};
1387 1490
1388 1491
1389static int rtnetlink_net_init(struct net *net) 1492static int __net_init rtnetlink_net_init(struct net *net)
1390{ 1493{
1391 struct sock *sk; 1494 struct sock *sk;
1392 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, 1495 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX,
@@ -1397,7 +1500,7 @@ static int rtnetlink_net_init(struct net *net)
1397 return 0; 1500 return 0;
1398} 1501}
1399 1502
1400static void rtnetlink_net_exit(struct net *net) 1503static void __net_exit rtnetlink_net_exit(struct net *net)
1401{ 1504{
1402 netlink_kernel_release(net->rtnl); 1505 netlink_kernel_release(net->rtnl);
1403 net->rtnl = NULL; 1506 net->rtnl = NULL;
diff --git a/net/core/scm.c b/net/core/scm.c
index b7ba91b074b3..9b264634acfd 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -156,6 +156,8 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
156 switch (cmsg->cmsg_type) 156 switch (cmsg->cmsg_type)
157 { 157 {
158 case SCM_RIGHTS: 158 case SCM_RIGHTS:
159 if (!sock->ops || sock->ops->family != PF_UNIX)
160 goto error;
159 err=scm_fp_copy(cmsg, &p->fp); 161 err=scm_fp_copy(cmsg, &p->fp);
160 if (err<0) 162 if (err<0)
161 goto error; 163 goto error;
diff --git a/net/core/sock.c b/net/core/sock.c
index 76ff58d43e26..c5812bbc2cc9 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -340,8 +340,12 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
340 rc = sk_backlog_rcv(sk, skb); 340 rc = sk_backlog_rcv(sk, skb);
341 341
342 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 342 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
343 } else 343 } else if (sk_add_backlog(sk, skb)) {
344 sk_add_backlog(sk, skb); 344 bh_unlock_sock(sk);
345 atomic_inc(&sk->sk_drops);
346 goto discard_and_relse;
347 }
348
345 bh_unlock_sock(sk); 349 bh_unlock_sock(sk);
346out: 350out:
347 sock_put(sk); 351 sock_put(sk);
@@ -741,7 +745,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
741 struct timeval tm; 745 struct timeval tm;
742 } v; 746 } v;
743 747
744 unsigned int lv = sizeof(int); 748 int lv = sizeof(int);
745 int len; 749 int len;
746 750
747 if (get_user(len, optlen)) 751 if (get_user(len, optlen))
@@ -1073,7 +1077,8 @@ static void __sk_free(struct sock *sk)
1073 if (sk->sk_destruct) 1077 if (sk->sk_destruct)
1074 sk->sk_destruct(sk); 1078 sk->sk_destruct(sk);
1075 1079
1076 filter = rcu_dereference(sk->sk_filter); 1080 filter = rcu_dereference_check(sk->sk_filter,
1081 atomic_read(&sk->sk_wmem_alloc) == 0);
1077 if (filter) { 1082 if (filter) {
1078 sk_filter_uncharge(sk, filter); 1083 sk_filter_uncharge(sk, filter);
1079 rcu_assign_pointer(sk->sk_filter, NULL); 1084 rcu_assign_pointer(sk->sk_filter, NULL);
@@ -1138,6 +1143,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1138 sock_lock_init(newsk); 1143 sock_lock_init(newsk);
1139 bh_lock_sock(newsk); 1144 bh_lock_sock(newsk);
1140 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1145 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1146 newsk->sk_backlog.len = 0;
1141 1147
1142 atomic_set(&newsk->sk_rmem_alloc, 0); 1148 atomic_set(&newsk->sk_rmem_alloc, 0);
1143 /* 1149 /*
@@ -1205,6 +1211,10 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1205 1211
1206 if (newsk->sk_prot->sockets_allocated) 1212 if (newsk->sk_prot->sockets_allocated)
1207 percpu_counter_inc(newsk->sk_prot->sockets_allocated); 1213 percpu_counter_inc(newsk->sk_prot->sockets_allocated);
1214
1215 if (sock_flag(newsk, SOCK_TIMESTAMP) ||
1216 sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1217 net_enable_timestamp();
1208 } 1218 }
1209out: 1219out:
1210 return newsk; 1220 return newsk;
@@ -1537,6 +1547,12 @@ static void __release_sock(struct sock *sk)
1537 1547
1538 bh_lock_sock(sk); 1548 bh_lock_sock(sk);
1539 } while ((skb = sk->sk_backlog.head) != NULL); 1549 } while ((skb = sk->sk_backlog.head) != NULL);
1550
1551 /*
1552 * Doing the zeroing here guarantee we can not loop forever
1553 * while a wild producer attempts to flood us.
1554 */
1555 sk->sk_backlog.len = 0;
1540} 1556}
1541 1557
1542/** 1558/**
@@ -1869,6 +1885,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1869 sk->sk_allocation = GFP_KERNEL; 1885 sk->sk_allocation = GFP_KERNEL;
1870 sk->sk_rcvbuf = sysctl_rmem_default; 1886 sk->sk_rcvbuf = sysctl_rmem_default;
1871 sk->sk_sndbuf = sysctl_wmem_default; 1887 sk->sk_sndbuf = sysctl_wmem_default;
1888 sk->sk_backlog.limit = sk->sk_rcvbuf << 1;
1872 sk->sk_state = TCP_CLOSE; 1889 sk->sk_state = TCP_CLOSE;
1873 sk_set_socket(sk, sock); 1890 sk_set_socket(sk, sock);
1874 1891
@@ -2136,13 +2153,13 @@ int sock_prot_inuse_get(struct net *net, struct proto *prot)
2136} 2153}
2137EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2154EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2138 2155
2139static int sock_inuse_init_net(struct net *net) 2156static int __net_init sock_inuse_init_net(struct net *net)
2140{ 2157{
2141 net->core.inuse = alloc_percpu(struct prot_inuse); 2158 net->core.inuse = alloc_percpu(struct prot_inuse);
2142 return net->core.inuse ? 0 : -ENOMEM; 2159 return net->core.inuse ? 0 : -ENOMEM;
2143} 2160}
2144 2161
2145static void sock_inuse_exit_net(struct net *net) 2162static void __net_exit sock_inuse_exit_net(struct net *net)
2146{ 2163{
2147 free_percpu(net->core.inuse); 2164 free_percpu(net->core.inuse);
2148} 2165}
@@ -2224,13 +2241,10 @@ int proto_register(struct proto *prot, int alloc_slab)
2224 } 2241 }
2225 2242
2226 if (prot->rsk_prot != NULL) { 2243 if (prot->rsk_prot != NULL) {
2227 static const char mask[] = "request_sock_%s"; 2244 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2228
2229 prot->rsk_prot->slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
2230 if (prot->rsk_prot->slab_name == NULL) 2245 if (prot->rsk_prot->slab_name == NULL)
2231 goto out_free_sock_slab; 2246 goto out_free_sock_slab;
2232 2247
2233 sprintf(prot->rsk_prot->slab_name, mask, prot->name);
2234 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, 2248 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2235 prot->rsk_prot->obj_size, 0, 2249 prot->rsk_prot->obj_size, 0,
2236 SLAB_HWCACHE_ALIGN, NULL); 2250 SLAB_HWCACHE_ALIGN, NULL);
@@ -2243,14 +2257,11 @@ int proto_register(struct proto *prot, int alloc_slab)
2243 } 2257 }
2244 2258
2245 if (prot->twsk_prot != NULL) { 2259 if (prot->twsk_prot != NULL) {
2246 static const char mask[] = "tw_sock_%s"; 2260 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2247
2248 prot->twsk_prot->twsk_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
2249 2261
2250 if (prot->twsk_prot->twsk_slab_name == NULL) 2262 if (prot->twsk_prot->twsk_slab_name == NULL)
2251 goto out_free_request_sock_slab; 2263 goto out_free_request_sock_slab;
2252 2264
2253 sprintf(prot->twsk_prot->twsk_slab_name, mask, prot->name);
2254 prot->twsk_prot->twsk_slab = 2265 prot->twsk_prot->twsk_slab =
2255 kmem_cache_create(prot->twsk_prot->twsk_slab_name, 2266 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2256 prot->twsk_prot->twsk_obj_size, 2267 prot->twsk_prot->twsk_obj_size,
@@ -2277,7 +2288,8 @@ out_free_request_sock_slab:
2277 prot->rsk_prot->slab = NULL; 2288 prot->rsk_prot->slab = NULL;
2278 } 2289 }
2279out_free_request_sock_slab_name: 2290out_free_request_sock_slab_name:
2280 kfree(prot->rsk_prot->slab_name); 2291 if (prot->rsk_prot)
2292 kfree(prot->rsk_prot->slab_name);
2281out_free_sock_slab: 2293out_free_sock_slab:
2282 kmem_cache_destroy(prot->slab); 2294 kmem_cache_destroy(prot->slab);
2283 prot->slab = NULL; 2295 prot->slab = NULL;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index db9f5b39388f..813e399220a7 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -54,7 +54,7 @@ MODULE_LICENSE("GPL");
54/**************** DCB attribute policies *************************************/ 54/**************** DCB attribute policies *************************************/
55 55
56/* DCB netlink attributes policy */ 56/* DCB netlink attributes policy */
57static struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = { 57static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
58 [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1}, 58 [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
59 [DCB_ATTR_STATE] = {.type = NLA_U8}, 59 [DCB_ATTR_STATE] = {.type = NLA_U8},
60 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED}, 60 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
@@ -68,7 +68,7 @@ static struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
68}; 68};
69 69
70/* DCB priority flow control to User Priority nested attributes */ 70/* DCB priority flow control to User Priority nested attributes */
71static struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = { 71static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
72 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8}, 72 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
73 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8}, 73 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
74 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8}, 74 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
@@ -81,7 +81,7 @@ static struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
81}; 81};
82 82
83/* DCB priority grouping nested attributes */ 83/* DCB priority grouping nested attributes */
84static struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = { 84static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
85 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED}, 85 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
86 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED}, 86 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
87 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED}, 87 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
@@ -103,7 +103,7 @@ static struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
103}; 103};
104 104
105/* DCB traffic class nested attributes. */ 105/* DCB traffic class nested attributes. */
106static struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = { 106static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
107 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8}, 107 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
108 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8}, 108 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
109 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8}, 109 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
@@ -112,7 +112,7 @@ static struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
112}; 112};
113 113
114/* DCB capabilities nested attributes. */ 114/* DCB capabilities nested attributes. */
115static struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = { 115static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
116 [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG}, 116 [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
117 [DCB_CAP_ATTR_PG] = {.type = NLA_U8}, 117 [DCB_CAP_ATTR_PG] = {.type = NLA_U8},
118 [DCB_CAP_ATTR_PFC] = {.type = NLA_U8}, 118 [DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
@@ -124,14 +124,14 @@ static struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
124}; 124};
125 125
126/* DCB capabilities nested attributes. */ 126/* DCB capabilities nested attributes. */
127static struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = { 127static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
128 [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG}, 128 [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
129 [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8}, 129 [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
130 [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8}, 130 [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
131}; 131};
132 132
133/* DCB BCN nested attributes. */ 133/* DCB BCN nested attributes. */
134static struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = { 134static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
135 [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8}, 135 [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
136 [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8}, 136 [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
137 [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8}, 137 [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
@@ -160,7 +160,7 @@ static struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
160}; 160};
161 161
162/* DCB APP nested attributes. */ 162/* DCB APP nested attributes. */
163static struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = { 163static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
164 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8}, 164 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
165 [DCB_APP_ATTR_ID] = {.type = NLA_U16}, 165 [DCB_APP_ATTR_ID] = {.type = NLA_U16},
166 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8}, 166 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index f3e9ba1cfd01..49d27c556bec 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -63,48 +63,37 @@ int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
63 u8 *ccid_array, array_len; 63 u8 *ccid_array, array_len;
64 int err = 0; 64 int err = 0;
65 65
66 if (len < ARRAY_SIZE(ccids))
67 return -EINVAL;
68
69 if (ccid_get_builtin_ccids(&ccid_array, &array_len)) 66 if (ccid_get_builtin_ccids(&ccid_array, &array_len))
70 return -ENOBUFS; 67 return -ENOBUFS;
71 68
72 if (put_user(array_len, optlen) || 69 if (put_user(array_len, optlen))
73 copy_to_user(optval, ccid_array, array_len)) 70 err = -EFAULT;
71 else if (len > 0 && copy_to_user(optval, ccid_array,
72 len > array_len ? array_len : len))
74 err = -EFAULT; 73 err = -EFAULT;
75 74
76 kfree(ccid_array); 75 kfree(ccid_array);
77 return err; 76 return err;
78} 77}
79 78
80static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) 79static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_fmt, const char *fmt,...)
81{ 80{
82 struct kmem_cache *slab; 81 struct kmem_cache *slab;
83 char slab_name_fmt[32], *slab_name;
84 va_list args; 82 va_list args;
85 83
86 va_start(args, fmt); 84 va_start(args, fmt);
87 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args); 85 vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args);
88 va_end(args); 86 va_end(args);
89 87
90 slab_name = kstrdup(slab_name_fmt, GFP_KERNEL); 88 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
91 if (slab_name == NULL)
92 return NULL;
93 slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0,
94 SLAB_HWCACHE_ALIGN, NULL); 89 SLAB_HWCACHE_ALIGN, NULL);
95 if (slab == NULL)
96 kfree(slab_name);
97 return slab; 90 return slab;
98} 91}
99 92
100static void ccid_kmem_cache_destroy(struct kmem_cache *slab) 93static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
101{ 94{
102 if (slab != NULL) { 95 if (slab != NULL)
103 const char *name = kmem_cache_name(slab);
104
105 kmem_cache_destroy(slab); 96 kmem_cache_destroy(slab);
106 kfree(name);
107 }
108} 97}
109 98
110static int ccid_activate(struct ccid_operations *ccid_ops) 99static int ccid_activate(struct ccid_operations *ccid_ops)
@@ -113,6 +102,7 @@ static int ccid_activate(struct ccid_operations *ccid_ops)
113 102
114 ccid_ops->ccid_hc_rx_slab = 103 ccid_ops->ccid_hc_rx_slab =
115 ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size, 104 ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size,
105 ccid_ops->ccid_hc_rx_slab_name,
116 "ccid%u_hc_rx_sock", 106 "ccid%u_hc_rx_sock",
117 ccid_ops->ccid_id); 107 ccid_ops->ccid_id);
118 if (ccid_ops->ccid_hc_rx_slab == NULL) 108 if (ccid_ops->ccid_hc_rx_slab == NULL)
@@ -120,6 +110,7 @@ static int ccid_activate(struct ccid_operations *ccid_ops)
120 110
121 ccid_ops->ccid_hc_tx_slab = 111 ccid_ops->ccid_hc_tx_slab =
122 ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size, 112 ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size,
113 ccid_ops->ccid_hc_tx_slab_name,
123 "ccid%u_hc_tx_sock", 114 "ccid%u_hc_tx_sock",
124 ccid_ops->ccid_id); 115 ccid_ops->ccid_id);
125 if (ccid_ops->ccid_hc_tx_slab == NULL) 116 if (ccid_ops->ccid_hc_tx_slab == NULL)
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index facedd20b531..6df6f8ac9636 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -19,7 +19,9 @@
19#include <linux/list.h> 19#include <linux/list.h>
20#include <linux/module.h> 20#include <linux/module.h>
21 21
22#define CCID_MAX 255 22/* maximum value for a CCID (RFC 4340, 19.5) */
23#define CCID_MAX 255
24#define CCID_SLAB_NAME_LENGTH 32
23 25
24struct tcp_info; 26struct tcp_info;
25 27
@@ -49,6 +51,8 @@ struct ccid_operations {
49 const char *ccid_name; 51 const char *ccid_name;
50 struct kmem_cache *ccid_hc_rx_slab, 52 struct kmem_cache *ccid_hc_rx_slab,
51 *ccid_hc_tx_slab; 53 *ccid_hc_tx_slab;
54 char ccid_hc_rx_slab_name[CCID_SLAB_NAME_LENGTH];
55 char ccid_hc_tx_slab_name[CCID_SLAB_NAME_LENGTH];
52 __u32 ccid_hc_rx_obj_size, 56 __u32 ccid_hc_rx_obj_size,
53 ccid_hc_tx_obj_size; 57 ccid_hc_tx_obj_size;
54 /* Interface Routines */ 58 /* Interface Routines */
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index dad7bc4878e0..4071eaf2b361 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -996,16 +996,16 @@ static struct inet_protosw dccp_v4_protosw = {
996 .flags = INET_PROTOSW_ICSK, 996 .flags = INET_PROTOSW_ICSK,
997}; 997};
998 998
999static int dccp_v4_init_net(struct net *net) 999static int __net_init dccp_v4_init_net(struct net *net)
1000{ 1000{
1001 int err; 1001 if (dccp_hashinfo.bhash == NULL)
1002 return -ESOCKTNOSUPPORT;
1002 1003
1003 err = inet_ctl_sock_create(&net->dccp.v4_ctl_sk, PF_INET, 1004 return inet_ctl_sock_create(&net->dccp.v4_ctl_sk, PF_INET,
1004 SOCK_DCCP, IPPROTO_DCCP, net); 1005 SOCK_DCCP, IPPROTO_DCCP, net);
1005 return err;
1006} 1006}
1007 1007
1008static void dccp_v4_exit_net(struct net *net) 1008static void __net_exit dccp_v4_exit_net(struct net *net)
1009{ 1009{
1010 inet_ctl_sock_destroy(net->dccp.v4_ctl_sk); 1010 inet_ctl_sock_destroy(net->dccp.v4_ctl_sk);
1011} 1011}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index baf05cf43c28..af3394df63b7 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1189,16 +1189,16 @@ static struct inet_protosw dccp_v6_protosw = {
1189 .flags = INET_PROTOSW_ICSK, 1189 .flags = INET_PROTOSW_ICSK,
1190}; 1190};
1191 1191
1192static int dccp_v6_init_net(struct net *net) 1192static int __net_init dccp_v6_init_net(struct net *net)
1193{ 1193{
1194 int err; 1194 if (dccp_hashinfo.bhash == NULL)
1195 return -ESOCKTNOSUPPORT;
1195 1196
1196 err = inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6, 1197 return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
1197 SOCK_DCCP, IPPROTO_DCCP, net); 1198 SOCK_DCCP, IPPROTO_DCCP, net);
1198 return err;
1199} 1199}
1200 1200
1201static void dccp_v6_exit_net(struct net *net) 1201static void __net_exit dccp_v6_exit_net(struct net *net)
1202{ 1202{
1203 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk); 1203 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1204} 1204}
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index af226a063141..0d508c359fa9 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -254,7 +254,7 @@ int dccp_child_process(struct sock *parent, struct sock *child,
254 * in main socket hash table and lock on listening 254 * in main socket hash table and lock on listening
255 * socket does not protect us more. 255 * socket does not protect us more.
256 */ 256 */
257 sk_add_backlog(child, skb); 257 __sk_add_backlog(child, skb);
258 } 258 }
259 259
260 bh_unlock_sock(child); 260 bh_unlock_sock(child);
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index dc328425fa20..f5b3464f1242 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -43,7 +43,7 @@ static int bufsize = 64 * 1024;
43static const char procname[] = "dccpprobe"; 43static const char procname[] = "dccpprobe";
44 44
45static struct { 45static struct {
46 struct kfifo *fifo; 46 struct kfifo fifo;
47 spinlock_t lock; 47 spinlock_t lock;
48 wait_queue_head_t wait; 48 wait_queue_head_t wait;
49 struct timespec tstart; 49 struct timespec tstart;
@@ -67,7 +67,7 @@ static void printl(const char *fmt, ...)
67 len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args); 67 len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args);
68 va_end(args); 68 va_end(args);
69 69
70 kfifo_put(dccpw.fifo, tbuf, len); 70 kfifo_in_locked(&dccpw.fifo, tbuf, len, &dccpw.lock);
71 wake_up(&dccpw.wait); 71 wake_up(&dccpw.wait);
72} 72}
73 73
@@ -109,7 +109,7 @@ static struct jprobe dccp_send_probe = {
109 109
110static int dccpprobe_open(struct inode *inode, struct file *file) 110static int dccpprobe_open(struct inode *inode, struct file *file)
111{ 111{
112 kfifo_reset(dccpw.fifo); 112 kfifo_reset(&dccpw.fifo);
113 getnstimeofday(&dccpw.tstart); 113 getnstimeofday(&dccpw.tstart);
114 return 0; 114 return 0;
115} 115}
@@ -131,11 +131,11 @@ static ssize_t dccpprobe_read(struct file *file, char __user *buf,
131 return -ENOMEM; 131 return -ENOMEM;
132 132
133 error = wait_event_interruptible(dccpw.wait, 133 error = wait_event_interruptible(dccpw.wait,
134 __kfifo_len(dccpw.fifo) != 0); 134 kfifo_len(&dccpw.fifo) != 0);
135 if (error) 135 if (error)
136 goto out_free; 136 goto out_free;
137 137
138 cnt = kfifo_get(dccpw.fifo, tbuf, len); 138 cnt = kfifo_out_locked(&dccpw.fifo, tbuf, len, &dccpw.lock);
139 error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0; 139 error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0;
140 140
141out_free: 141out_free:
@@ -156,14 +156,13 @@ static __init int dccpprobe_init(void)
156 156
157 init_waitqueue_head(&dccpw.wait); 157 init_waitqueue_head(&dccpw.wait);
158 spin_lock_init(&dccpw.lock); 158 spin_lock_init(&dccpw.lock);
159 dccpw.fifo = kfifo_alloc(bufsize, GFP_KERNEL, &dccpw.lock); 159 if (kfifo_alloc(&dccpw.fifo, bufsize, GFP_KERNEL))
160 if (IS_ERR(dccpw.fifo)) 160 return ret;
161 return PTR_ERR(dccpw.fifo);
162
163 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) 161 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
164 goto err0; 162 goto err0;
165 163
166 ret = register_jprobe(&dccp_send_probe); 164 try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0,
165 "dccp");
167 if (ret) 166 if (ret)
168 goto err1; 167 goto err1;
169 168
@@ -172,14 +171,14 @@ static __init int dccpprobe_init(void)
172err1: 171err1:
173 proc_net_remove(&init_net, procname); 172 proc_net_remove(&init_net, procname);
174err0: 173err0:
175 kfifo_free(dccpw.fifo); 174 kfifo_free(&dccpw.fifo);
176 return ret; 175 return ret;
177} 176}
178module_init(dccpprobe_init); 177module_init(dccpprobe_init);
179 178
180static __exit void dccpprobe_exit(void) 179static __exit void dccpprobe_exit(void)
181{ 180{
182 kfifo_free(dccpw.fifo); 181 kfifo_free(&dccpw.fifo);
183 proc_net_remove(&init_net, procname); 182 proc_net_remove(&init_net, procname);
184 unregister_jprobe(&dccp_send_probe); 183 unregister_jprobe(&dccp_send_probe);
185 184
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 671cd1413d59..aa4cef374fd0 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -835,6 +835,8 @@ verify_sock_status:
835 len = -EFAULT; 835 len = -EFAULT;
836 break; 836 break;
837 } 837 }
838 if (flags & MSG_TRUNC)
839 len = skb->len;
838 found_fin_ok: 840 found_fin_ok:
839 if (!(flags & MSG_PEEK)) 841 if (!(flags & MSG_PEEK))
840 sk_eat_skb(sk, skb, 0); 842 sk_eat_skb(sk, skb, 0);
@@ -1003,12 +1005,13 @@ EXPORT_SYMBOL_GPL(dccp_shutdown);
1003 1005
1004static inline int dccp_mib_init(void) 1006static inline int dccp_mib_init(void)
1005{ 1007{
1006 return snmp_mib_init((void**)dccp_statistics, sizeof(struct dccp_mib)); 1008 return snmp_mib_init((void __percpu **)dccp_statistics,
1009 sizeof(struct dccp_mib));
1007} 1010}
1008 1011
1009static inline void dccp_mib_exit(void) 1012static inline void dccp_mib_exit(void)
1010{ 1013{
1011 snmp_mib_free((void**)dccp_statistics); 1014 snmp_mib_free((void __percpu **)dccp_statistics);
1012} 1015}
1013 1016
1014static int thash_entries; 1017static int thash_entries;
@@ -1033,7 +1036,7 @@ static int __init dccp_init(void)
1033 FIELD_SIZEOF(struct sk_buff, cb)); 1036 FIELD_SIZEOF(struct sk_buff, cb));
1034 rc = percpu_counter_init(&dccp_orphan_count, 0); 1037 rc = percpu_counter_init(&dccp_orphan_count, 0);
1035 if (rc) 1038 if (rc)
1036 goto out; 1039 goto out_fail;
1037 rc = -ENOBUFS; 1040 rc = -ENOBUFS;
1038 inet_hashinfo_init(&dccp_hashinfo); 1041 inet_hashinfo_init(&dccp_hashinfo);
1039 dccp_hashinfo.bind_bucket_cachep = 1042 dccp_hashinfo.bind_bucket_cachep =
@@ -1122,8 +1125,9 @@ static int __init dccp_init(void)
1122 goto out_sysctl_exit; 1125 goto out_sysctl_exit;
1123 1126
1124 dccp_timestamping_init(); 1127 dccp_timestamping_init();
1125out: 1128
1126 return rc; 1129 return 0;
1130
1127out_sysctl_exit: 1131out_sysctl_exit:
1128 dccp_sysctl_exit(); 1132 dccp_sysctl_exit();
1129out_ackvec_exit: 1133out_ackvec_exit:
@@ -1132,18 +1136,19 @@ out_free_dccp_mib:
1132 dccp_mib_exit(); 1136 dccp_mib_exit();
1133out_free_dccp_bhash: 1137out_free_dccp_bhash:
1134 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order); 1138 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1135 dccp_hashinfo.bhash = NULL;
1136out_free_dccp_locks: 1139out_free_dccp_locks:
1137 inet_ehash_locks_free(&dccp_hashinfo); 1140 inet_ehash_locks_free(&dccp_hashinfo);
1138out_free_dccp_ehash: 1141out_free_dccp_ehash:
1139 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order); 1142 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1140 dccp_hashinfo.ehash = NULL;
1141out_free_bind_bucket_cachep: 1143out_free_bind_bucket_cachep:
1142 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); 1144 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1143 dccp_hashinfo.bind_bucket_cachep = NULL;
1144out_free_percpu: 1145out_free_percpu:
1145 percpu_counter_destroy(&dccp_orphan_count); 1146 percpu_counter_destroy(&dccp_orphan_count);
1146 goto out; 1147out_fail:
1148 dccp_hashinfo.bhash = NULL;
1149 dccp_hashinfo.ehash = NULL;
1150 dccp_hashinfo.bind_bucket_cachep = NULL;
1151 return rc;
1147} 1152}
1148 1153
1149static void __exit dccp_fini(void) 1154static void __exit dccp_fini(void)
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index a03284061a31..a7bf03ca0a36 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1155,8 +1155,8 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
1155 1155
1156 if (!(flags & MSG_TRYHARD)) { 1156 if (!(flags & MSG_TRYHARD)) {
1157 rcu_read_lock_bh(); 1157 rcu_read_lock_bh();
1158 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt; 1158 for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
1159 rt = rcu_dereference(rt->u.dst.dn_next)) { 1159 rt = rcu_dereference_bh(rt->u.dst.dn_next)) {
1160 if ((flp->fld_dst == rt->fl.fld_dst) && 1160 if ((flp->fld_dst == rt->fl.fld_dst) &&
1161 (flp->fld_src == rt->fl.fld_src) && 1161 (flp->fld_src == rt->fl.fld_src) &&
1162 (flp->mark == rt->fl.mark) && 1162 (flp->mark == rt->fl.mark) &&
@@ -1618,9 +1618,9 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
1618 if (h > s_h) 1618 if (h > s_h)
1619 s_idx = 0; 1619 s_idx = 0;
1620 rcu_read_lock_bh(); 1620 rcu_read_lock_bh();
1621 for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0; 1621 for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
1622 rt; 1622 rt;
1623 rt = rcu_dereference(rt->u.dst.dn_next), idx++) { 1623 rt = rcu_dereference_bh(rt->u.dst.dn_next), idx++) {
1624 if (idx < s_idx) 1624 if (idx < s_idx)
1625 continue; 1625 continue;
1626 skb_dst_set(skb, dst_clone(&rt->u.dst)); 1626 skb_dst_set(skb, dst_clone(&rt->u.dst));
@@ -1654,12 +1654,12 @@ static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq)
1654 1654
1655 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) { 1655 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {
1656 rcu_read_lock_bh(); 1656 rcu_read_lock_bh();
1657 rt = dn_rt_hash_table[s->bucket].chain; 1657 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
1658 if (rt) 1658 if (rt)
1659 break; 1659 break;
1660 rcu_read_unlock_bh(); 1660 rcu_read_unlock_bh();
1661 } 1661 }
1662 return rcu_dereference(rt); 1662 return rt;
1663} 1663}
1664 1664
1665static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt) 1665static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt)
@@ -1674,7 +1674,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
1674 rcu_read_lock_bh(); 1674 rcu_read_lock_bh();
1675 rt = dn_rt_hash_table[s->bucket].chain; 1675 rt = dn_rt_hash_table[s->bucket].chain;
1676 } 1676 }
1677 return rcu_dereference(rt); 1677 return rcu_dereference_bh(rt);
1678} 1678}
1679 1679
1680static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 1680static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index dd3db88f8f0a..205a1c12f3c0 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -73,8 +73,8 @@ __setup("ether=", netdev_boot_setup);
73 * @len: packet length (<= skb->len) 73 * @len: packet length (<= skb->len)
74 * 74 *
75 * 75 *
76 * Set the protocol type. For a packet of type ETH_P_802_3 we put the length 76 * Set the protocol type. For a packet of type ETH_P_802_3/2 we put the length
77 * in here instead. It is up to the 802.2 layer to carry protocol information. 77 * in here instead.
78 */ 78 */
79int eth_header(struct sk_buff *skb, struct net_device *dev, 79int eth_header(struct sk_buff *skb, struct net_device *dev,
80 unsigned short type, 80 unsigned short type,
@@ -82,7 +82,7 @@ int eth_header(struct sk_buff *skb, struct net_device *dev,
82{ 82{
83 struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN); 83 struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
84 84
85 if (type != ETH_P_802_3) 85 if (type != ETH_P_802_3 && type != ETH_P_802_2)
86 eth->h_proto = htons(type); 86 eth->h_proto = htons(type);
87 else 87 else
88 eth->h_proto = htons(len); 88 eth->h_proto = htons(len);
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 70491d9035eb..0c94a1ac2946 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -166,7 +166,7 @@ config IP_PNP_DHCP
166 166
167 If unsure, say Y. Note that if you want to use DHCP, a DHCP server 167 If unsure, say Y. Note that if you want to use DHCP, a DHCP server
168 must be operating on your network. Read 168 must be operating on your network. Read
169 <file:Documentation/filesystems/nfsroot.txt> for details. 169 <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
170 170
171config IP_PNP_BOOTP 171config IP_PNP_BOOTP
172 bool "IP: BOOTP support" 172 bool "IP: BOOTP support"
@@ -181,7 +181,7 @@ config IP_PNP_BOOTP
181 does BOOTP itself, providing all necessary information on the kernel 181 does BOOTP itself, providing all necessary information on the kernel
182 command line, you can say N here. If unsure, say Y. Note that if you 182 command line, you can say N here. If unsure, say Y. Note that if you
183 want to use BOOTP, a BOOTP server must be operating on your network. 183 want to use BOOTP, a BOOTP server must be operating on your network.
184 Read <file:Documentation/filesystems/nfsroot.txt> for details. 184 Read <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
185 185
186config IP_PNP_RARP 186config IP_PNP_RARP
187 bool "IP: RARP support" 187 bool "IP: RARP support"
@@ -194,7 +194,7 @@ config IP_PNP_RARP
194 older protocol which is being obsoleted by BOOTP and DHCP), say Y 194 older protocol which is being obsoleted by BOOTP and DHCP), say Y
195 here. Note that if you want to use RARP, a RARP server must be 195 here. Note that if you want to use RARP, a RARP server must be
196 operating on your network. Read 196 operating on your network. Read
197 <file:Documentation/filesystems/nfsroot.txt> for details. 197 <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
198 198
199# not yet ready.. 199# not yet ready..
200# bool ' IP: ARP support' CONFIG_IP_PNP_ARP 200# bool ' IP: ARP support' CONFIG_IP_PNP_ARP
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 7d12c6a9b19b..33b7dffa7732 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1385,7 +1385,7 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1385} 1385}
1386EXPORT_SYMBOL_GPL(inet_ctl_sock_create); 1386EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1387 1387
1388unsigned long snmp_fold_field(void *mib[], int offt) 1388unsigned long snmp_fold_field(void __percpu *mib[], int offt)
1389{ 1389{
1390 unsigned long res = 0; 1390 unsigned long res = 0;
1391 int i; 1391 int i;
@@ -1398,7 +1398,7 @@ unsigned long snmp_fold_field(void *mib[], int offt)
1398} 1398}
1399EXPORT_SYMBOL_GPL(snmp_fold_field); 1399EXPORT_SYMBOL_GPL(snmp_fold_field);
1400 1400
1401int snmp_mib_init(void *ptr[2], size_t mibsize) 1401int snmp_mib_init(void __percpu *ptr[2], size_t mibsize)
1402{ 1402{
1403 BUG_ON(ptr == NULL); 1403 BUG_ON(ptr == NULL);
1404 ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); 1404 ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long));
@@ -1416,7 +1416,7 @@ err0:
1416} 1416}
1417EXPORT_SYMBOL_GPL(snmp_mib_init); 1417EXPORT_SYMBOL_GPL(snmp_mib_init);
1418 1418
1419void snmp_mib_free(void *ptr[2]) 1419void snmp_mib_free(void __percpu *ptr[2])
1420{ 1420{
1421 BUG_ON(ptr == NULL); 1421 BUG_ON(ptr == NULL);
1422 free_percpu(ptr[0]); 1422 free_percpu(ptr[0]);
@@ -1460,25 +1460,25 @@ static const struct net_protocol icmp_protocol = {
1460 1460
1461static __net_init int ipv4_mib_init_net(struct net *net) 1461static __net_init int ipv4_mib_init_net(struct net *net)
1462{ 1462{
1463 if (snmp_mib_init((void **)net->mib.tcp_statistics, 1463 if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
1464 sizeof(struct tcp_mib)) < 0) 1464 sizeof(struct tcp_mib)) < 0)
1465 goto err_tcp_mib; 1465 goto err_tcp_mib;
1466 if (snmp_mib_init((void **)net->mib.ip_statistics, 1466 if (snmp_mib_init((void __percpu **)net->mib.ip_statistics,
1467 sizeof(struct ipstats_mib)) < 0) 1467 sizeof(struct ipstats_mib)) < 0)
1468 goto err_ip_mib; 1468 goto err_ip_mib;
1469 if (snmp_mib_init((void **)net->mib.net_statistics, 1469 if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
1470 sizeof(struct linux_mib)) < 0) 1470 sizeof(struct linux_mib)) < 0)
1471 goto err_net_mib; 1471 goto err_net_mib;
1472 if (snmp_mib_init((void **)net->mib.udp_statistics, 1472 if (snmp_mib_init((void __percpu **)net->mib.udp_statistics,
1473 sizeof(struct udp_mib)) < 0) 1473 sizeof(struct udp_mib)) < 0)
1474 goto err_udp_mib; 1474 goto err_udp_mib;
1475 if (snmp_mib_init((void **)net->mib.udplite_statistics, 1475 if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics,
1476 sizeof(struct udp_mib)) < 0) 1476 sizeof(struct udp_mib)) < 0)
1477 goto err_udplite_mib; 1477 goto err_udplite_mib;
1478 if (snmp_mib_init((void **)net->mib.icmp_statistics, 1478 if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics,
1479 sizeof(struct icmp_mib)) < 0) 1479 sizeof(struct icmp_mib)) < 0)
1480 goto err_icmp_mib; 1480 goto err_icmp_mib;
1481 if (snmp_mib_init((void **)net->mib.icmpmsg_statistics, 1481 if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics,
1482 sizeof(struct icmpmsg_mib)) < 0) 1482 sizeof(struct icmpmsg_mib)) < 0)
1483 goto err_icmpmsg_mib; 1483 goto err_icmpmsg_mib;
1484 1484
@@ -1486,30 +1486,30 @@ static __net_init int ipv4_mib_init_net(struct net *net)
1486 return 0; 1486 return 0;
1487 1487
1488err_icmpmsg_mib: 1488err_icmpmsg_mib:
1489 snmp_mib_free((void **)net->mib.icmp_statistics); 1489 snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
1490err_icmp_mib: 1490err_icmp_mib:
1491 snmp_mib_free((void **)net->mib.udplite_statistics); 1491 snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
1492err_udplite_mib: 1492err_udplite_mib:
1493 snmp_mib_free((void **)net->mib.udp_statistics); 1493 snmp_mib_free((void __percpu **)net->mib.udp_statistics);
1494err_udp_mib: 1494err_udp_mib:
1495 snmp_mib_free((void **)net->mib.net_statistics); 1495 snmp_mib_free((void __percpu **)net->mib.net_statistics);
1496err_net_mib: 1496err_net_mib:
1497 snmp_mib_free((void **)net->mib.ip_statistics); 1497 snmp_mib_free((void __percpu **)net->mib.ip_statistics);
1498err_ip_mib: 1498err_ip_mib:
1499 snmp_mib_free((void **)net->mib.tcp_statistics); 1499 snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
1500err_tcp_mib: 1500err_tcp_mib:
1501 return -ENOMEM; 1501 return -ENOMEM;
1502} 1502}
1503 1503
1504static __net_exit void ipv4_mib_exit_net(struct net *net) 1504static __net_exit void ipv4_mib_exit_net(struct net *net)
1505{ 1505{
1506 snmp_mib_free((void **)net->mib.icmpmsg_statistics); 1506 snmp_mib_free((void __percpu **)net->mib.icmpmsg_statistics);
1507 snmp_mib_free((void **)net->mib.icmp_statistics); 1507 snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
1508 snmp_mib_free((void **)net->mib.udplite_statistics); 1508 snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
1509 snmp_mib_free((void **)net->mib.udp_statistics); 1509 snmp_mib_free((void __percpu **)net->mib.udp_statistics);
1510 snmp_mib_free((void **)net->mib.net_statistics); 1510 snmp_mib_free((void __percpu **)net->mib.net_statistics);
1511 snmp_mib_free((void **)net->mib.ip_statistics); 1511 snmp_mib_free((void __percpu **)net->mib.ip_statistics);
1512 snmp_mib_free((void **)net->mib.tcp_statistics); 1512 snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
1513} 1513}
1514 1514
1515static __net_initdata struct pernet_operations ipv4_mib_ops = { 1515static __net_initdata struct pernet_operations ipv4_mib_ops = {
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 7ed3e4ae93ae..987b47dc69ad 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -393,7 +393,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
393 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 393 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
394 return; 394 return;
395 395
396 x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET); 396 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET);
397 if (!x) 397 if (!x)
398 return; 398 return;
399 printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n", 399 printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n",
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index c95cd93acf29..c4dd13542802 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -70,6 +70,7 @@
70 * bonding can change the skb before 70 * bonding can change the skb before
71 * sending (e.g. insert 8021q tag). 71 * sending (e.g. insert 8021q tag).
72 * Harald Welte : convert to make use of jenkins hash 72 * Harald Welte : convert to make use of jenkins hash
73 * Jesper D. Brouer: Proxy ARP PVLAN RFC 3069 support.
73 */ 74 */
74 75
75#include <linux/module.h> 76#include <linux/module.h>
@@ -524,12 +525,15 @@ int arp_bind_neighbour(struct dst_entry *dst)
524/* 525/*
525 * Check if we can use proxy ARP for this path 526 * Check if we can use proxy ARP for this path
526 */ 527 */
527 528static inline int arp_fwd_proxy(struct in_device *in_dev,
528static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt) 529 struct net_device *dev, struct rtable *rt)
529{ 530{
530 struct in_device *out_dev; 531 struct in_device *out_dev;
531 int imi, omi = -1; 532 int imi, omi = -1;
532 533
534 if (rt->u.dst.dev == dev)
535 return 0;
536
533 if (!IN_DEV_PROXY_ARP(in_dev)) 537 if (!IN_DEV_PROXY_ARP(in_dev))
534 return 0; 538 return 0;
535 539
@@ -548,6 +552,43 @@ static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt)
548} 552}
549 553
550/* 554/*
555 * Check for RFC3069 proxy arp private VLAN (allow to send back to same dev)
556 *
557 * RFC3069 supports proxy arp replies back to the same interface. This
558 * is done to support (ethernet) switch features, like RFC 3069, where
559 * the individual ports are not allowed to communicate with each
560 * other, BUT they are allowed to talk to the upstream router. As
561 * described in RFC 3069, it is possible to allow these hosts to
562 * communicate through the upstream router, by proxy_arp'ing.
563 *
564 * RFC 3069: "VLAN Aggregation for Efficient IP Address Allocation"
565 *
566 * This technology is known by different names:
567 * In RFC 3069 it is called VLAN Aggregation.
568 * Cisco and Allied Telesyn call it Private VLAN.
569 * Hewlett-Packard call it Source-Port filtering or port-isolation.
570 * Ericsson call it MAC-Forced Forwarding (RFC Draft).
571 *
572 */
573static inline int arp_fwd_pvlan(struct in_device *in_dev,
574 struct net_device *dev, struct rtable *rt,
575 __be32 sip, __be32 tip)
576{
577 /* Private VLAN is only concerned about the same ethernet segment */
578 if (rt->u.dst.dev != dev)
579 return 0;
580
581 /* Don't reply on self probes (often done by windowz boxes)*/
582 if (sip == tip)
583 return 0;
584
585 if (IN_DEV_PROXY_ARP_PVLAN(in_dev))
586 return 1;
587 else
588 return 0;
589}
590
591/*
551 * Interface to link layer: send routine and receive handler. 592 * Interface to link layer: send routine and receive handler.
552 */ 593 */
553 594
@@ -833,8 +874,11 @@ static int arp_process(struct sk_buff *skb)
833 } 874 }
834 goto out; 875 goto out;
835 } else if (IN_DEV_FORWARD(in_dev)) { 876 } else if (IN_DEV_FORWARD(in_dev)) {
836 if (addr_type == RTN_UNICAST && rt->u.dst.dev != dev && 877 if (addr_type == RTN_UNICAST &&
837 (arp_fwd_proxy(in_dev, rt) || pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) { 878 (arp_fwd_proxy(in_dev, dev, rt) ||
879 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
880 pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))
881 {
838 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 882 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
839 if (n) 883 if (n)
840 neigh_release(n); 884 neigh_release(n);
@@ -863,7 +907,8 @@ static int arp_process(struct sk_buff *skb)
863 devices (strip is candidate) 907 devices (strip is candidate)
864 */ 908 */
865 if (n == NULL && 909 if (n == NULL &&
866 arp->ar_op == htons(ARPOP_REPLY) && 910 (arp->ar_op == htons(ARPOP_REPLY) ||
911 (arp->ar_op == htons(ARPOP_REQUEST) && tip == sip)) &&
867 inet_addr_type(net, sip) == RTN_UNICAST) 912 inet_addr_type(net, sip) == RTN_UNICAST)
868 n = __neigh_lookup(&arp_tbl, &sip, dev, 1); 913 n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
869 } 914 }
@@ -1239,8 +1284,7 @@ void __init arp_init(void)
1239 dev_add_pack(&arp_packet_type); 1284 dev_add_pack(&arp_packet_type);
1240 arp_proc_init(); 1285 arp_proc_init();
1241#ifdef CONFIG_SYSCTL 1286#ifdef CONFIG_SYSCTL
1242 neigh_sysctl_register(NULL, &arp_tbl.parms, NET_IPV4, 1287 neigh_sysctl_register(NULL, &arp_tbl.parms, "ipv4", NULL);
1243 NET_IPV4_NEIGH, "ipv4", NULL);
1244#endif 1288#endif
1245 register_netdevice_notifier(&arp_netdev_notifier); 1289 register_netdevice_notifier(&arp_netdev_notifier);
1246} 1290}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5cdbc102a418..3feb2b390308 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -64,20 +64,20 @@
64 64
65static struct ipv4_devconf ipv4_devconf = { 65static struct ipv4_devconf ipv4_devconf = {
66 .data = { 66 .data = {
67 [NET_IPV4_CONF_ACCEPT_REDIRECTS - 1] = 1, 67 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
68 [NET_IPV4_CONF_SEND_REDIRECTS - 1] = 1, 68 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
69 [NET_IPV4_CONF_SECURE_REDIRECTS - 1] = 1, 69 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
70 [NET_IPV4_CONF_SHARED_MEDIA - 1] = 1, 70 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
71 }, 71 },
72}; 72};
73 73
74static struct ipv4_devconf ipv4_devconf_dflt = { 74static struct ipv4_devconf ipv4_devconf_dflt = {
75 .data = { 75 .data = {
76 [NET_IPV4_CONF_ACCEPT_REDIRECTS - 1] = 1, 76 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
77 [NET_IPV4_CONF_SEND_REDIRECTS - 1] = 1, 77 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
78 [NET_IPV4_CONF_SECURE_REDIRECTS - 1] = 1, 78 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
79 [NET_IPV4_CONF_SHARED_MEDIA - 1] = 1, 79 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
80 [NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE - 1] = 1, 80 [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
81 }, 81 },
82}; 82};
83 83
@@ -1194,7 +1194,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1194 hlist_for_each_entry_rcu(dev, node, head, index_hlist) { 1194 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
1195 if (idx < s_idx) 1195 if (idx < s_idx)
1196 goto cont; 1196 goto cont;
1197 if (idx > s_idx) 1197 if (h > s_h || idx > s_idx)
1198 s_ip_idx = 0; 1198 s_ip_idx = 0;
1199 in_dev = __in_dev_get_rcu(dev); 1199 in_dev = __in_dev_get_rcu(dev);
1200 if (!in_dev) 1200 if (!in_dev)
@@ -1317,14 +1317,19 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
1317{ 1317{
1318 int *valp = ctl->data; 1318 int *valp = ctl->data;
1319 int val = *valp; 1319 int val = *valp;
1320 loff_t pos = *ppos;
1320 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 1321 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1321 1322
1322 if (write && *valp != val) { 1323 if (write && *valp != val) {
1323 struct net *net = ctl->extra2; 1324 struct net *net = ctl->extra2;
1324 1325
1325 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) { 1326 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
1326 if (!rtnl_trylock()) 1327 if (!rtnl_trylock()) {
1328 /* Restore the original values before restarting */
1329 *valp = val;
1330 *ppos = pos;
1327 return restart_syscall(); 1331 return restart_syscall();
1332 }
1328 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) { 1333 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
1329 inet_forward_change(net); 1334 inet_forward_change(net);
1330 } else if (*valp) { 1335 } else if (*valp) {
@@ -1360,7 +1365,7 @@ int ipv4_doint_and_flush(ctl_table *ctl, int write,
1360 { \ 1365 { \
1361 .procname = name, \ 1366 .procname = name, \
1362 .data = ipv4_devconf.data + \ 1367 .data = ipv4_devconf.data + \
1363 NET_IPV4_CONF_ ## attr - 1, \ 1368 IPV4_DEVCONF_ ## attr - 1, \
1364 .maxlen = sizeof(int), \ 1369 .maxlen = sizeof(int), \
1365 .mode = mval, \ 1370 .mode = mval, \
1366 .proc_handler = proc, \ 1371 .proc_handler = proc, \
@@ -1381,7 +1386,7 @@ int ipv4_doint_and_flush(ctl_table *ctl, int write,
1381 1386
1382static struct devinet_sysctl_table { 1387static struct devinet_sysctl_table {
1383 struct ctl_table_header *sysctl_header; 1388 struct ctl_table_header *sysctl_header;
1384 struct ctl_table devinet_vars[__NET_IPV4_CONF_MAX]; 1389 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
1385 char *dev_name; 1390 char *dev_name;
1386} devinet_sysctl = { 1391} devinet_sysctl = {
1387 .devinet_vars = { 1392 .devinet_vars = {
@@ -1397,6 +1402,7 @@ static struct devinet_sysctl_table {
1397 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE, 1402 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
1398 "accept_source_route"), 1403 "accept_source_route"),
1399 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"), 1404 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
1405 DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
1400 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"), 1406 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
1401 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"), 1407 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
1402 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"), 1408 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
@@ -1407,6 +1413,7 @@ static struct devinet_sysctl_table {
1407 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"), 1413 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
1408 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"), 1414 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
1409 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"), 1415 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
1416 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
1410 1417
1411 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"), 1418 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
1412 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"), 1419 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
@@ -1485,8 +1492,7 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
1485 1492
1486static void devinet_sysctl_register(struct in_device *idev) 1493static void devinet_sysctl_register(struct in_device *idev)
1487{ 1494{
1488 neigh_sysctl_register(idev->dev, idev->arp_parms, NET_IPV4, 1495 neigh_sysctl_register(idev->dev, idev->arp_parms, "ipv4", NULL);
1489 NET_IPV4_NEIGH, "ipv4", NULL);
1490 __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name, 1496 __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
1491 &idev->cnf); 1497 &idev->cnf);
1492} 1498}
@@ -1501,7 +1507,7 @@ static struct ctl_table ctl_forward_entry[] = {
1501 { 1507 {
1502 .procname = "ip_forward", 1508 .procname = "ip_forward",
1503 .data = &ipv4_devconf.data[ 1509 .data = &ipv4_devconf.data[
1504 NET_IPV4_CONF_FORWARDING - 1], 1510 IPV4_DEVCONF_FORWARDING - 1],
1505 .maxlen = sizeof(int), 1511 .maxlen = sizeof(int),
1506 .mode = 0644, 1512 .mode = 0644,
1507 .proc_handler = devinet_sysctl_forward, 1513 .proc_handler = devinet_sysctl_forward,
@@ -1545,7 +1551,7 @@ static __net_init int devinet_init_net(struct net *net)
1545 if (tbl == NULL) 1551 if (tbl == NULL)
1546 goto err_alloc_ctl; 1552 goto err_alloc_ctl;
1547 1553
1548 tbl[0].data = &all->data[NET_IPV4_CONF_FORWARDING - 1]; 1554 tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
1549 tbl[0].extra1 = all; 1555 tbl[0].extra1 = all;
1550 tbl[0].extra2 = net; 1556 tbl[0].extra2 = net;
1551#endif 1557#endif
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 1948895beb6d..14ca1f1c3fb0 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -422,7 +422,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
422 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 422 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
423 return; 423 return;
424 424
425 x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET); 425 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET);
426 if (!x) 426 if (!x)
427 return; 427 return;
428 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n", 428 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 3323168ee52d..9b3e28ed5240 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -252,6 +252,8 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
252 no_addr = in_dev->ifa_list == NULL; 252 no_addr = in_dev->ifa_list == NULL;
253 rpf = IN_DEV_RPFILTER(in_dev); 253 rpf = IN_DEV_RPFILTER(in_dev);
254 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev); 254 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
255 if (mark && !IN_DEV_SRC_VMARK(in_dev))
256 fl.mark = 0;
255 } 257 }
256 rcu_read_unlock(); 258 rcu_read_unlock();
257 259
@@ -881,7 +883,7 @@ static void nl_fib_input(struct sk_buff *skb)
881 netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT); 883 netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT);
882} 884}
883 885
884static int nl_fib_lookup_init(struct net *net) 886static int __net_init nl_fib_lookup_init(struct net *net)
885{ 887{
886 struct sock *sk; 888 struct sock *sk;
887 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0, 889 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0,
@@ -1002,7 +1004,7 @@ fail:
1002 return err; 1004 return err;
1003} 1005}
1004 1006
1005static void __net_exit ip_fib_net_exit(struct net *net) 1007static void ip_fib_net_exit(struct net *net)
1006{ 1008{
1007 unsigned int i; 1009 unsigned int i;
1008 1010
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index ed19aa6919c2..1af0ea0fb6a2 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -62,8 +62,8 @@ static DEFINE_SPINLOCK(fib_multipath_lock);
62#define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \ 62#define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \
63for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) 63for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
64 64
65#define change_nexthops(fi) { int nhsel; struct fib_nh * nh; \ 65#define change_nexthops(fi) { int nhsel; struct fib_nh *nexthop_nh; \
66for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++) 66for (nhsel=0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nexthop_nh++, nhsel++)
67 67
68#else /* CONFIG_IP_ROUTE_MULTIPATH */ 68#else /* CONFIG_IP_ROUTE_MULTIPATH */
69 69
@@ -72,7 +72,7 @@ for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++,
72#define for_nexthops(fi) { int nhsel = 0; const struct fib_nh * nh = (fi)->fib_nh; \ 72#define for_nexthops(fi) { int nhsel = 0; const struct fib_nh * nh = (fi)->fib_nh; \
73for (nhsel=0; nhsel < 1; nhsel++) 73for (nhsel=0; nhsel < 1; nhsel++)
74 74
75#define change_nexthops(fi) { int nhsel = 0; struct fib_nh * nh = (struct fib_nh *)((fi)->fib_nh); \ 75#define change_nexthops(fi) { int nhsel = 0; struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
76for (nhsel=0; nhsel < 1; nhsel++) 76for (nhsel=0; nhsel < 1; nhsel++)
77 77
78#endif /* CONFIG_IP_ROUTE_MULTIPATH */ 78#endif /* CONFIG_IP_ROUTE_MULTIPATH */
@@ -145,9 +145,9 @@ void free_fib_info(struct fib_info *fi)
145 return; 145 return;
146 } 146 }
147 change_nexthops(fi) { 147 change_nexthops(fi) {
148 if (nh->nh_dev) 148 if (nexthop_nh->nh_dev)
149 dev_put(nh->nh_dev); 149 dev_put(nexthop_nh->nh_dev);
150 nh->nh_dev = NULL; 150 nexthop_nh->nh_dev = NULL;
151 } endfor_nexthops(fi); 151 } endfor_nexthops(fi);
152 fib_info_cnt--; 152 fib_info_cnt--;
153 release_net(fi->fib_net); 153 release_net(fi->fib_net);
@@ -162,9 +162,9 @@ void fib_release_info(struct fib_info *fi)
162 if (fi->fib_prefsrc) 162 if (fi->fib_prefsrc)
163 hlist_del(&fi->fib_lhash); 163 hlist_del(&fi->fib_lhash);
164 change_nexthops(fi) { 164 change_nexthops(fi) {
165 if (!nh->nh_dev) 165 if (!nexthop_nh->nh_dev)
166 continue; 166 continue;
167 hlist_del(&nh->nh_hash); 167 hlist_del(&nexthop_nh->nh_hash);
168 } endfor_nexthops(fi) 168 } endfor_nexthops(fi)
169 fi->fib_dead = 1; 169 fi->fib_dead = 1;
170 fib_info_put(fi); 170 fib_info_put(fi);
@@ -395,19 +395,20 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
395 if (!rtnh_ok(rtnh, remaining)) 395 if (!rtnh_ok(rtnh, remaining))
396 return -EINVAL; 396 return -EINVAL;
397 397
398 nh->nh_flags = (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; 398 nexthop_nh->nh_flags =
399 nh->nh_oif = rtnh->rtnh_ifindex; 399 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
400 nh->nh_weight = rtnh->rtnh_hops + 1; 400 nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
401 nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
401 402
402 attrlen = rtnh_attrlen(rtnh); 403 attrlen = rtnh_attrlen(rtnh);
403 if (attrlen > 0) { 404 if (attrlen > 0) {
404 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 405 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
405 406
406 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 407 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
407 nh->nh_gw = nla ? nla_get_be32(nla) : 0; 408 nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
408#ifdef CONFIG_NET_CLS_ROUTE 409#ifdef CONFIG_NET_CLS_ROUTE
409 nla = nla_find(attrs, attrlen, RTA_FLOW); 410 nla = nla_find(attrs, attrlen, RTA_FLOW);
410 nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; 411 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
411#endif 412#endif
412 } 413 }
413 414
@@ -527,10 +528,6 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
527 if (nh->nh_gw) { 528 if (nh->nh_gw) {
528 struct fib_result res; 529 struct fib_result res;
529 530
530#ifdef CONFIG_IP_ROUTE_PERVASIVE
531 if (nh->nh_flags&RTNH_F_PERVASIVE)
532 return 0;
533#endif
534 if (nh->nh_flags&RTNH_F_ONLINK) { 531 if (nh->nh_flags&RTNH_F_ONLINK) {
535 struct net_device *dev; 532 struct net_device *dev;
536 533
@@ -738,7 +735,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
738 735
739 fi->fib_nhs = nhs; 736 fi->fib_nhs = nhs;
740 change_nexthops(fi) { 737 change_nexthops(fi) {
741 nh->nh_parent = fi; 738 nexthop_nh->nh_parent = fi;
742 } endfor_nexthops(fi) 739 } endfor_nexthops(fi)
743 740
744 if (cfg->fc_mx) { 741 if (cfg->fc_mx) {
@@ -808,7 +805,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
808 goto failure; 805 goto failure;
809 } else { 806 } else {
810 change_nexthops(fi) { 807 change_nexthops(fi) {
811 if ((err = fib_check_nh(cfg, fi, nh)) != 0) 808 if ((err = fib_check_nh(cfg, fi, nexthop_nh)) != 0)
812 goto failure; 809 goto failure;
813 } endfor_nexthops(fi) 810 } endfor_nexthops(fi)
814 } 811 }
@@ -843,11 +840,11 @@ link_it:
843 struct hlist_head *head; 840 struct hlist_head *head;
844 unsigned int hash; 841 unsigned int hash;
845 842
846 if (!nh->nh_dev) 843 if (!nexthop_nh->nh_dev)
847 continue; 844 continue;
848 hash = fib_devindex_hashfn(nh->nh_dev->ifindex); 845 hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
849 head = &fib_info_devhash[hash]; 846 head = &fib_info_devhash[hash];
850 hlist_add_head(&nh->nh_hash, head); 847 hlist_add_head(&nexthop_nh->nh_hash, head);
851 } endfor_nexthops(fi) 848 } endfor_nexthops(fi)
852 spin_unlock_bh(&fib_info_lock); 849 spin_unlock_bh(&fib_info_lock);
853 return fi; 850 return fi;
@@ -1080,21 +1077,21 @@ int fib_sync_down_dev(struct net_device *dev, int force)
1080 prev_fi = fi; 1077 prev_fi = fi;
1081 dead = 0; 1078 dead = 0;
1082 change_nexthops(fi) { 1079 change_nexthops(fi) {
1083 if (nh->nh_flags&RTNH_F_DEAD) 1080 if (nexthop_nh->nh_flags&RTNH_F_DEAD)
1084 dead++; 1081 dead++;
1085 else if (nh->nh_dev == dev && 1082 else if (nexthop_nh->nh_dev == dev &&
1086 nh->nh_scope != scope) { 1083 nexthop_nh->nh_scope != scope) {
1087 nh->nh_flags |= RTNH_F_DEAD; 1084 nexthop_nh->nh_flags |= RTNH_F_DEAD;
1088#ifdef CONFIG_IP_ROUTE_MULTIPATH 1085#ifdef CONFIG_IP_ROUTE_MULTIPATH
1089 spin_lock_bh(&fib_multipath_lock); 1086 spin_lock_bh(&fib_multipath_lock);
1090 fi->fib_power -= nh->nh_power; 1087 fi->fib_power -= nexthop_nh->nh_power;
1091 nh->nh_power = 0; 1088 nexthop_nh->nh_power = 0;
1092 spin_unlock_bh(&fib_multipath_lock); 1089 spin_unlock_bh(&fib_multipath_lock);
1093#endif 1090#endif
1094 dead++; 1091 dead++;
1095 } 1092 }
1096#ifdef CONFIG_IP_ROUTE_MULTIPATH 1093#ifdef CONFIG_IP_ROUTE_MULTIPATH
1097 if (force > 1 && nh->nh_dev == dev) { 1094 if (force > 1 && nexthop_nh->nh_dev == dev) {
1098 dead = fi->fib_nhs; 1095 dead = fi->fib_nhs;
1099 break; 1096 break;
1100 } 1097 }
@@ -1144,18 +1141,20 @@ int fib_sync_up(struct net_device *dev)
1144 prev_fi = fi; 1141 prev_fi = fi;
1145 alive = 0; 1142 alive = 0;
1146 change_nexthops(fi) { 1143 change_nexthops(fi) {
1147 if (!(nh->nh_flags&RTNH_F_DEAD)) { 1144 if (!(nexthop_nh->nh_flags&RTNH_F_DEAD)) {
1148 alive++; 1145 alive++;
1149 continue; 1146 continue;
1150 } 1147 }
1151 if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP)) 1148 if (nexthop_nh->nh_dev == NULL ||
1149 !(nexthop_nh->nh_dev->flags&IFF_UP))
1152 continue; 1150 continue;
1153 if (nh->nh_dev != dev || !__in_dev_get_rtnl(dev)) 1151 if (nexthop_nh->nh_dev != dev ||
1152 !__in_dev_get_rtnl(dev))
1154 continue; 1153 continue;
1155 alive++; 1154 alive++;
1156 spin_lock_bh(&fib_multipath_lock); 1155 spin_lock_bh(&fib_multipath_lock);
1157 nh->nh_power = 0; 1156 nexthop_nh->nh_power = 0;
1158 nh->nh_flags &= ~RTNH_F_DEAD; 1157 nexthop_nh->nh_flags &= ~RTNH_F_DEAD;
1159 spin_unlock_bh(&fib_multipath_lock); 1158 spin_unlock_bh(&fib_multipath_lock);
1160 } endfor_nexthops(fi) 1159 } endfor_nexthops(fi)
1161 1160
@@ -1182,9 +1181,9 @@ void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
1182 if (fi->fib_power <= 0) { 1181 if (fi->fib_power <= 0) {
1183 int power = 0; 1182 int power = 0;
1184 change_nexthops(fi) { 1183 change_nexthops(fi) {
1185 if (!(nh->nh_flags&RTNH_F_DEAD)) { 1184 if (!(nexthop_nh->nh_flags&RTNH_F_DEAD)) {
1186 power += nh->nh_weight; 1185 power += nexthop_nh->nh_weight;
1187 nh->nh_power = nh->nh_weight; 1186 nexthop_nh->nh_power = nexthop_nh->nh_weight;
1188 } 1187 }
1189 } endfor_nexthops(fi); 1188 } endfor_nexthops(fi);
1190 fi->fib_power = power; 1189 fi->fib_power = power;
@@ -1204,9 +1203,10 @@ void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
1204 w = jiffies % fi->fib_power; 1203 w = jiffies % fi->fib_power;
1205 1204
1206 change_nexthops(fi) { 1205 change_nexthops(fi) {
1207 if (!(nh->nh_flags&RTNH_F_DEAD) && nh->nh_power) { 1206 if (!(nexthop_nh->nh_flags&RTNH_F_DEAD) &&
1208 if ((w -= nh->nh_power) <= 0) { 1207 nexthop_nh->nh_power) {
1209 nh->nh_power--; 1208 if ((w -= nexthop_nh->nh_power) <= 0) {
1209 nexthop_nh->nh_power--;
1210 fi->fib_power--; 1210 fi->fib_power--;
1211 res->nh_sel = nhsel; 1211 res->nh_sel = nhsel;
1212 spin_unlock_bh(&fib_multipath_lock); 1212 spin_unlock_bh(&fib_multipath_lock);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index af5d89792860..01ef8ba9025c 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -961,7 +961,9 @@ fib_find_node(struct trie *t, u32 key)
961 struct node *n; 961 struct node *n;
962 962
963 pos = 0; 963 pos = 0;
964 n = rcu_dereference(t->trie); 964 n = rcu_dereference_check(t->trie,
965 rcu_read_lock_held() ||
966 lockdep_rtnl_is_held());
965 967
966 while (n != NULL && NODE_TYPE(n) == T_TNODE) { 968 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
967 tn = (struct tnode *) n; 969 tn = (struct tnode *) n;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index fe11f60ce41b..4b4c2bcd15db 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -114,7 +114,7 @@ struct icmp_bxm {
114/* An array of errno for error messages from dest unreach. */ 114/* An array of errno for error messages from dest unreach. */
115/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */ 115/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
116 116
117struct icmp_err icmp_err_convert[] = { 117const struct icmp_err icmp_err_convert[] = {
118 { 118 {
119 .errno = ENETUNREACH, /* ICMP_NET_UNREACH */ 119 .errno = ENETUNREACH, /* ICMP_NET_UNREACH */
120 .fatal = 0, 120 .fatal = 0,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 76c08402c933..63bf298ca109 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -946,7 +946,6 @@ int igmp_rcv(struct sk_buff *skb)
946 break; 946 break;
947 case IGMP_HOST_MEMBERSHIP_REPORT: 947 case IGMP_HOST_MEMBERSHIP_REPORT:
948 case IGMPV2_HOST_MEMBERSHIP_REPORT: 948 case IGMPV2_HOST_MEMBERSHIP_REPORT:
949 case IGMPV3_HOST_MEMBERSHIP_REPORT:
950 /* Is it our report looped back? */ 949 /* Is it our report looped back? */
951 if (skb_rtable(skb)->fl.iif == 0) 950 if (skb_rtable(skb)->fl.iif == 0)
952 break; 951 break;
@@ -960,6 +959,7 @@ int igmp_rcv(struct sk_buff *skb)
960 in_dev_put(in_dev); 959 in_dev_put(in_dev);
961 return pim_rcv_v1(skb); 960 return pim_rcv_v1(skb);
962#endif 961#endif
962 case IGMPV3_HOST_MEMBERSHIP_REPORT:
963 case IGMP_DVMRP: 963 case IGMP_DVMRP:
964 case IGMP_TRACE: 964 case IGMP_TRACE:
965 case IGMP_HOST_LEAVE_MESSAGE: 965 case IGMP_HOST_LEAVE_MESSAGE:
@@ -1799,7 +1799,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1799 iml->next = inet->mc_list; 1799 iml->next = inet->mc_list;
1800 iml->sflist = NULL; 1800 iml->sflist = NULL;
1801 iml->sfmode = MCAST_EXCLUDE; 1801 iml->sfmode = MCAST_EXCLUDE;
1802 inet->mc_list = iml; 1802 rcu_assign_pointer(inet->mc_list, iml);
1803 ip_mc_inc_group(in_dev, addr); 1803 ip_mc_inc_group(in_dev, addr);
1804 err = 0; 1804 err = 0;
1805done: 1805done:
@@ -1807,24 +1807,46 @@ done:
1807 return err; 1807 return err;
1808} 1808}
1809 1809
1810static void ip_sf_socklist_reclaim(struct rcu_head *rp)
1811{
1812 struct ip_sf_socklist *psf;
1813
1814 psf = container_of(rp, struct ip_sf_socklist, rcu);
1815 /* sk_omem_alloc should have been decreased by the caller*/
1816 kfree(psf);
1817}
1818
1810static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, 1819static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1811 struct in_device *in_dev) 1820 struct in_device *in_dev)
1812{ 1821{
1822 struct ip_sf_socklist *psf = iml->sflist;
1813 int err; 1823 int err;
1814 1824
1815 if (iml->sflist == NULL) { 1825 if (psf == NULL) {
1816 /* any-source empty exclude case */ 1826 /* any-source empty exclude case */
1817 return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, 1827 return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
1818 iml->sfmode, 0, NULL, 0); 1828 iml->sfmode, 0, NULL, 0);
1819 } 1829 }
1820 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, 1830 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
1821 iml->sfmode, iml->sflist->sl_count, 1831 iml->sfmode, psf->sl_count, psf->sl_addr, 0);
1822 iml->sflist->sl_addr, 0); 1832 rcu_assign_pointer(iml->sflist, NULL);
1823 sock_kfree_s(sk, iml->sflist, IP_SFLSIZE(iml->sflist->sl_max)); 1833 /* decrease mem now to avoid the memleak warning */
1824 iml->sflist = NULL; 1834 atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
1835 call_rcu(&psf->rcu, ip_sf_socklist_reclaim);
1825 return err; 1836 return err;
1826} 1837}
1827 1838
1839
1840static void ip_mc_socklist_reclaim(struct rcu_head *rp)
1841{
1842 struct ip_mc_socklist *iml;
1843
1844 iml = container_of(rp, struct ip_mc_socklist, rcu);
1845 /* sk_omem_alloc should have been decreased by the caller*/
1846 kfree(iml);
1847}
1848
1849
1828/* 1850/*
1829 * Ask a socket to leave a group. 1851 * Ask a socket to leave a group.
1830 */ 1852 */
@@ -1854,12 +1876,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1854 1876
1855 (void) ip_mc_leave_src(sk, iml, in_dev); 1877 (void) ip_mc_leave_src(sk, iml, in_dev);
1856 1878
1857 *imlp = iml->next; 1879 rcu_assign_pointer(*imlp, iml->next);
1858 1880
1859 if (in_dev) 1881 if (in_dev)
1860 ip_mc_dec_group(in_dev, group); 1882 ip_mc_dec_group(in_dev, group);
1861 rtnl_unlock(); 1883 rtnl_unlock();
1862 sock_kfree_s(sk, iml, sizeof(*iml)); 1884 /* decrease mem now to avoid the memleak warning */
1885 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
1886 call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
1863 return 0; 1887 return 0;
1864 } 1888 }
1865 if (!in_dev) 1889 if (!in_dev)
@@ -1974,9 +1998,12 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1974 if (psl) { 1998 if (psl) {
1975 for (i=0; i<psl->sl_count; i++) 1999 for (i=0; i<psl->sl_count; i++)
1976 newpsl->sl_addr[i] = psl->sl_addr[i]; 2000 newpsl->sl_addr[i] = psl->sl_addr[i];
1977 sock_kfree_s(sk, psl, IP_SFLSIZE(psl->sl_max)); 2001 /* decrease mem now to avoid the memleak warning */
2002 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2003 call_rcu(&psl->rcu, ip_sf_socklist_reclaim);
1978 } 2004 }
1979 pmc->sflist = psl = newpsl; 2005 rcu_assign_pointer(pmc->sflist, newpsl);
2006 psl = newpsl;
1980 } 2007 }
1981 rv = 1; /* > 0 for insert logic below if sl_count is 0 */ 2008 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
1982 for (i=0; i<psl->sl_count; i++) { 2009 for (i=0; i<psl->sl_count; i++) {
@@ -2072,11 +2099,13 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2072 if (psl) { 2099 if (psl) {
2073 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2100 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2074 psl->sl_count, psl->sl_addr, 0); 2101 psl->sl_count, psl->sl_addr, 0);
2075 sock_kfree_s(sk, psl, IP_SFLSIZE(psl->sl_max)); 2102 /* decrease mem now to avoid the memleak warning */
2103 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2104 call_rcu(&psl->rcu, ip_sf_socklist_reclaim);
2076 } else 2105 } else
2077 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2106 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2078 0, NULL, 0); 2107 0, NULL, 0);
2079 pmc->sflist = newpsl; 2108 rcu_assign_pointer(pmc->sflist, newpsl);
2080 pmc->sfmode = msf->imsf_fmode; 2109 pmc->sfmode = msf->imsf_fmode;
2081 err = 0; 2110 err = 0;
2082done: 2111done:
@@ -2209,30 +2238,40 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
2209 struct ip_mc_socklist *pmc; 2238 struct ip_mc_socklist *pmc;
2210 struct ip_sf_socklist *psl; 2239 struct ip_sf_socklist *psl;
2211 int i; 2240 int i;
2241 int ret;
2212 2242
2243 ret = 1;
2213 if (!ipv4_is_multicast(loc_addr)) 2244 if (!ipv4_is_multicast(loc_addr))
2214 return 1; 2245 goto out;
2215 2246
2216 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 2247 rcu_read_lock();
2248 for (pmc=rcu_dereference(inet->mc_list); pmc; pmc=rcu_dereference(pmc->next)) {
2217 if (pmc->multi.imr_multiaddr.s_addr == loc_addr && 2249 if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
2218 pmc->multi.imr_ifindex == dif) 2250 pmc->multi.imr_ifindex == dif)
2219 break; 2251 break;
2220 } 2252 }
2253 ret = inet->mc_all;
2221 if (!pmc) 2254 if (!pmc)
2222 return inet->mc_all; 2255 goto unlock;
2223 psl = pmc->sflist; 2256 psl = pmc->sflist;
2257 ret = (pmc->sfmode == MCAST_EXCLUDE);
2224 if (!psl) 2258 if (!psl)
2225 return pmc->sfmode == MCAST_EXCLUDE; 2259 goto unlock;
2226 2260
2227 for (i=0; i<psl->sl_count; i++) { 2261 for (i=0; i<psl->sl_count; i++) {
2228 if (psl->sl_addr[i] == rmt_addr) 2262 if (psl->sl_addr[i] == rmt_addr)
2229 break; 2263 break;
2230 } 2264 }
2265 ret = 0;
2231 if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count) 2266 if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
2232 return 0; 2267 goto unlock;
2233 if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) 2268 if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
2234 return 0; 2269 goto unlock;
2235 return 1; 2270 ret = 1;
2271unlock:
2272 rcu_read_unlock();
2273out:
2274 return ret;
2236} 2275}
2237 2276
2238/* 2277/*
@@ -2251,7 +2290,7 @@ void ip_mc_drop_socket(struct sock *sk)
2251 rtnl_lock(); 2290 rtnl_lock();
2252 while ((iml = inet->mc_list) != NULL) { 2291 while ((iml = inet->mc_list) != NULL) {
2253 struct in_device *in_dev; 2292 struct in_device *in_dev;
2254 inet->mc_list = iml->next; 2293 rcu_assign_pointer(inet->mc_list, iml->next);
2255 2294
2256 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); 2295 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
2257 (void) ip_mc_leave_src(sk, iml, in_dev); 2296 (void) ip_mc_leave_src(sk, iml, in_dev);
@@ -2259,7 +2298,9 @@ void ip_mc_drop_socket(struct sock *sk)
2259 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2298 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2260 in_dev_put(in_dev); 2299 in_dev_put(in_dev);
2261 } 2300 }
2262 sock_kfree_s(sk, iml, sizeof(*iml)); 2301 /* decrease mem now to avoid the memleak warning */
2302 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
2303 call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
2263 } 2304 }
2264 rtnl_unlock(); 2305 rtnl_unlock();
2265} 2306}
@@ -2603,7 +2644,7 @@ static const struct file_operations igmp_mcf_seq_fops = {
2603 .release = seq_release_net, 2644 .release = seq_release_net,
2604}; 2645};
2605 2646
2606static int igmp_net_init(struct net *net) 2647static int __net_init igmp_net_init(struct net *net)
2607{ 2648{
2608 struct proc_dir_entry *pde; 2649 struct proc_dir_entry *pde;
2609 2650
@@ -2621,7 +2662,7 @@ out_igmp:
2621 return -ENOMEM; 2662 return -ENOMEM;
2622} 2663}
2623 2664
2624static void igmp_net_exit(struct net *net) 2665static void __net_exit igmp_net_exit(struct net *net)
2625{ 2666{
2626 proc_net_remove(net, "mcfilter"); 2667 proc_net_remove(net, "mcfilter");
2627 proc_net_remove(net, "igmp"); 2668 proc_net_remove(net, "igmp");
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index ee16475f8fc3..8da6429269dd 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -529,6 +529,8 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
529 syn_ack_recalc(req, thresh, max_retries, 529 syn_ack_recalc(req, thresh, max_retries,
530 queue->rskq_defer_accept, 530 queue->rskq_defer_accept,
531 &expire, &resend); 531 &expire, &resend);
532 if (req->rsk_ops->syn_ack_timeout)
533 req->rsk_ops->syn_ack_timeout(parent, req);
532 if (!expire && 534 if (!expire &&
533 (!resend || 535 (!resend ||
534 !req->rsk_ops->rtx_syn_ack(parent, req, NULL) || 536 !req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index bdb78dd180ce..1aaa8110d84b 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -368,7 +368,7 @@ static int inet_diag_bc_run(const void *bc, int len,
368 yes = entry->sport >= op[1].no; 368 yes = entry->sport >= op[1].no;
369 break; 369 break;
370 case INET_DIAG_BC_S_LE: 370 case INET_DIAG_BC_S_LE:
371 yes = entry->dport <= op[1].no; 371 yes = entry->sport <= op[1].no;
372 break; 372 break;
373 case INET_DIAG_BC_D_GE: 373 case INET_DIAG_BC_D_GE:
374 yes = entry->dport >= op[1].no; 374 yes = entry->dport >= op[1].no;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 86964b353c31..b59430bc041c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -32,6 +32,8 @@
32#include <linux/netdevice.h> 32#include <linux/netdevice.h>
33#include <linux/jhash.h> 33#include <linux/jhash.h>
34#include <linux/random.h> 34#include <linux/random.h>
35#include <net/route.h>
36#include <net/dst.h>
35#include <net/sock.h> 37#include <net/sock.h>
36#include <net/ip.h> 38#include <net/ip.h>
37#include <net/icmp.h> 39#include <net/icmp.h>
@@ -205,11 +207,34 @@ static void ip_expire(unsigned long arg)
205 if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { 207 if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
206 struct sk_buff *head = qp->q.fragments; 208 struct sk_buff *head = qp->q.fragments;
207 209
208 /* Send an ICMP "Fragment Reassembly Timeout" message. */
209 rcu_read_lock(); 210 rcu_read_lock();
210 head->dev = dev_get_by_index_rcu(net, qp->iif); 211 head->dev = dev_get_by_index_rcu(net, qp->iif);
211 if (head->dev) 212 if (!head->dev)
212 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 213 goto out_rcu_unlock;
214
215 /*
216 * Only search router table for the head fragment,
217 * when defraging timeout at PRE_ROUTING HOOK.
218 */
219 if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) {
220 const struct iphdr *iph = ip_hdr(head);
221 int err = ip_route_input(head, iph->daddr, iph->saddr,
222 iph->tos, head->dev);
223 if (unlikely(err))
224 goto out_rcu_unlock;
225
226 /*
227 * Only an end host needs to send an ICMP
228 * "Fragment Reassembly Timeout" message, per RFC792.
229 */
230 if (skb_rtable(head)->rt_type != RTN_LOCAL)
231 goto out_rcu_unlock;
232
233 }
234
235 /* Send an ICMP "Fragment Reassembly Timeout" message. */
236 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
237out_rcu_unlock:
213 rcu_read_unlock(); 238 rcu_read_unlock();
214 } 239 }
215out: 240out:
@@ -646,7 +671,7 @@ static struct ctl_table ip4_frags_ctl_table[] = {
646 { } 671 { }
647}; 672};
648 673
649static int ip4_frags_ns_ctl_register(struct net *net) 674static int __net_init ip4_frags_ns_ctl_register(struct net *net)
650{ 675{
651 struct ctl_table *table; 676 struct ctl_table *table;
652 struct ctl_table_header *hdr; 677 struct ctl_table_header *hdr;
@@ -676,7 +701,7 @@ err_alloc:
676 return -ENOMEM; 701 return -ENOMEM;
677} 702}
678 703
679static void ip4_frags_ns_ctl_unregister(struct net *net) 704static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
680{ 705{
681 struct ctl_table *table; 706 struct ctl_table *table;
682 707
@@ -704,7 +729,7 @@ static inline void ip4_frags_ctl_register(void)
704} 729}
705#endif 730#endif
706 731
707static int ipv4_frags_init_net(struct net *net) 732static int __net_init ipv4_frags_init_net(struct net *net)
708{ 733{
709 /* 734 /*
710 * Fragment cache limits. We will commit 256K at one time. Should we 735 * Fragment cache limits. We will commit 256K at one time. Should we
@@ -726,7 +751,7 @@ static int ipv4_frags_init_net(struct net *net)
726 return ip4_frags_ns_ctl_register(net); 751 return ip4_frags_ns_ctl_register(net);
727} 752}
728 753
729static void ipv4_frags_exit_net(struct net *net) 754static void __net_exit ipv4_frags_exit_net(struct net *net)
730{ 755{
731 ip4_frags_ns_ctl_unregister(net); 756 ip4_frags_ns_ctl_unregister(net);
732 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); 757 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f36ce156cac6..f78402d097b3 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -793,7 +793,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
793 } 793 }
794 794
795 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) { 795 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
796 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 796 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
797 ip_rt_put(rt); 797 ip_rt_put(rt);
798 goto tx_error; 798 goto tx_error;
799 } 799 }
@@ -810,11 +810,13 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
810 tunnel->err_count = 0; 810 tunnel->err_count = 0;
811 } 811 }
812 812
813 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen; 813 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->u.dst.header_len;
814 814
815 if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| 815 if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
816 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 816 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
817 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 817 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
818 if (max_headroom > dev->needed_headroom)
819 dev->needed_headroom = max_headroom;
818 if (!new_skb) { 820 if (!new_skb) {
819 ip_rt_put(rt); 821 ip_rt_put(rt);
820 txq->tx_dropped++; 822 txq->tx_dropped++;
@@ -1144,12 +1146,9 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1144 1146
1145 if (saddr) 1147 if (saddr)
1146 memcpy(&iph->saddr, saddr, 4); 1148 memcpy(&iph->saddr, saddr, 4);
1147 1149 if (daddr)
1148 if (daddr) {
1149 memcpy(&iph->daddr, daddr, 4); 1150 memcpy(&iph->daddr, daddr, 4);
1150 return t->hlen; 1151 if (iph->daddr)
1151 }
1152 if (iph->daddr && !ipv4_is_multicast(iph->daddr))
1153 return t->hlen; 1152 return t->hlen;
1154 1153
1155 return -t->hlen; 1154 return -t->hlen;
@@ -1307,7 +1306,7 @@ static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1307 } 1306 }
1308} 1307}
1309 1308
1310static int ipgre_init_net(struct net *net) 1309static int __net_init ipgre_init_net(struct net *net)
1311{ 1310{
1312 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 1311 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1313 int err; 1312 int err;
@@ -1334,7 +1333,7 @@ err_alloc_dev:
1334 return err; 1333 return err;
1335} 1334}
1336 1335
1337static void ipgre_exit_net(struct net *net) 1336static void __net_exit ipgre_exit_net(struct net *net)
1338{ 1337{
1339 struct ipgre_net *ign; 1338 struct ipgre_net *ign;
1340 LIST_HEAD(list); 1339 LIST_HEAD(list);
@@ -1665,14 +1664,15 @@ static int __init ipgre_init(void)
1665 1664
1666 printk(KERN_INFO "GRE over IPv4 tunneling driver\n"); 1665 printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
1667 1666
1668 if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) {
1669 printk(KERN_INFO "ipgre init: can't add protocol\n");
1670 return -EAGAIN;
1671 }
1672
1673 err = register_pernet_device(&ipgre_net_ops); 1667 err = register_pernet_device(&ipgre_net_ops);
1674 if (err < 0) 1668 if (err < 0)
1675 goto gen_device_failed; 1669 return err;
1670
1671 err = inet_add_protocol(&ipgre_protocol, IPPROTO_GRE);
1672 if (err < 0) {
1673 printk(KERN_INFO "ipgre init: can't add protocol\n");
1674 goto add_proto_failed;
1675 }
1676 1676
1677 err = rtnl_link_register(&ipgre_link_ops); 1677 err = rtnl_link_register(&ipgre_link_ops);
1678 if (err < 0) 1678 if (err < 0)
@@ -1688,9 +1688,9 @@ out:
1688tap_ops_failed: 1688tap_ops_failed:
1689 rtnl_link_unregister(&ipgre_link_ops); 1689 rtnl_link_unregister(&ipgre_link_ops);
1690rtnl_link_failed: 1690rtnl_link_failed:
1691 unregister_pernet_device(&ipgre_net_ops);
1692gen_device_failed:
1693 inet_del_protocol(&ipgre_protocol, IPPROTO_GRE); 1691 inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
1692add_proto_failed:
1693 unregister_pernet_device(&ipgre_net_ops);
1694 goto out; 1694 goto out;
1695} 1695}
1696 1696
@@ -1698,9 +1698,9 @@ static void __exit ipgre_fini(void)
1698{ 1698{
1699 rtnl_link_unregister(&ipgre_tap_ops); 1699 rtnl_link_unregister(&ipgre_tap_ops);
1700 rtnl_link_unregister(&ipgre_link_ops); 1700 rtnl_link_unregister(&ipgre_link_ops);
1701 unregister_pernet_device(&ipgre_net_ops);
1702 if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) 1701 if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
1703 printk(KERN_INFO "ipgre close: can't remove protocol\n"); 1702 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1703 unregister_pernet_device(&ipgre_net_ops);
1704} 1704}
1705 1705
1706module_init(ipgre_init); 1706module_init(ipgre_init);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index e34013a78ef4..3451799e3dbf 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -254,7 +254,7 @@ int ip_mc_output(struct sk_buff *skb)
254 */ 254 */
255 255
256 if (rt->rt_flags&RTCF_MULTICAST) { 256 if (rt->rt_flags&RTCF_MULTICAST) {
257 if ((!sk || inet_sk(sk)->mc_loop) 257 if (sk_mc_loop(sk)
258#ifdef CONFIG_IP_MROUTE 258#ifdef CONFIG_IP_MROUTE
259 /* Small optimization: do not loopback not local frames, 259 /* Small optimization: do not loopback not local frames,
260 which returned after forwarding; they will be dropped 260 which returned after forwarding; they will be dropped
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index cafad9baff03..644dc43a55de 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -451,7 +451,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
451 (1<<IP_TTL) | (1<<IP_HDRINCL) | 451 (1<<IP_TTL) | (1<<IP_HDRINCL) |
452 (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) | 452 (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
453 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | 453 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
454 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) || 454 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
455 (1<<IP_MINTTL))) ||
455 optname == IP_MULTICAST_TTL || 456 optname == IP_MULTICAST_TTL ||
456 optname == IP_MULTICAST_ALL || 457 optname == IP_MULTICAST_ALL ||
457 optname == IP_MULTICAST_LOOP || 458 optname == IP_MULTICAST_LOOP ||
@@ -936,6 +937,14 @@ mc_msf_out:
936 inet->transparent = !!val; 937 inet->transparent = !!val;
937 break; 938 break;
938 939
940 case IP_MINTTL:
941 if (optlen < 1)
942 goto e_inval;
943 if (val < 0 || val > 255)
944 goto e_inval;
945 inet->min_ttl = val;
946 break;
947
939 default: 948 default:
940 err = -ENOPROTOOPT; 949 err = -ENOPROTOOPT;
941 break; 950 break;
@@ -1198,6 +1207,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1198 case IP_TRANSPARENT: 1207 case IP_TRANSPARENT:
1199 val = inet->transparent; 1208 val = inet->transparent;
1200 break; 1209 break;
1210 case IP_MINTTL:
1211 val = inet->min_ttl;
1212 break;
1201 default: 1213 default:
1202 release_sock(sk); 1214 release_sock(sk);
1203 return -ENOPROTOOPT; 1215 return -ENOPROTOOPT;
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 38fbf04150ae..629067571f02 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -25,6 +25,7 @@
25 25
26static void ipcomp4_err(struct sk_buff *skb, u32 info) 26static void ipcomp4_err(struct sk_buff *skb, u32 info)
27{ 27{
28 struct net *net = dev_net(skb->dev);
28 __be32 spi; 29 __be32 spi;
29 struct iphdr *iph = (struct iphdr *)skb->data; 30 struct iphdr *iph = (struct iphdr *)skb->data;
30 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); 31 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
@@ -35,7 +36,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
35 return; 36 return;
36 37
37 spi = htonl(ntohs(ipch->cpi)); 38 spi = htonl(ntohs(ipch->cpi));
38 x = xfrm_state_lookup(&init_net, (xfrm_address_t *)&iph->daddr, 39 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr,
39 spi, IPPROTO_COMP, AF_INET); 40 spi, IPPROTO_COMP, AF_INET);
40 if (!x) 41 if (!x)
41 return; 42 return;
@@ -47,9 +48,10 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
47/* We always hold one tunnel user reference to indicate a tunnel */ 48/* We always hold one tunnel user reference to indicate a tunnel */
48static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) 49static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
49{ 50{
51 struct net *net = xs_net(x);
50 struct xfrm_state *t; 52 struct xfrm_state *t;
51 53
52 t = xfrm_state_alloc(&init_net); 54 t = xfrm_state_alloc(net);
53 if (t == NULL) 55 if (t == NULL)
54 goto out; 56 goto out;
55 57
@@ -61,6 +63,7 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
61 t->props.mode = x->props.mode; 63 t->props.mode = x->props.mode;
62 t->props.saddr.a4 = x->props.saddr.a4; 64 t->props.saddr.a4 = x->props.saddr.a4;
63 t->props.flags = x->props.flags; 65 t->props.flags = x->props.flags;
66 memcpy(&t->mark, &x->mark, sizeof(t->mark));
64 67
65 if (xfrm_init_state(t)) 68 if (xfrm_init_state(t))
66 goto error; 69 goto error;
@@ -82,10 +85,12 @@ error:
82 */ 85 */
83static int ipcomp_tunnel_attach(struct xfrm_state *x) 86static int ipcomp_tunnel_attach(struct xfrm_state *x)
84{ 87{
88 struct net *net = xs_net(x);
85 int err = 0; 89 int err = 0;
86 struct xfrm_state *t; 90 struct xfrm_state *t;
91 u32 mark = x->mark.v & x->mark.m;
87 92
88 t = xfrm_state_lookup(&init_net, (xfrm_address_t *)&x->id.daddr.a4, 93 t = xfrm_state_lookup(net, mark, (xfrm_address_t *)&x->id.daddr.a4,
89 x->props.saddr.a4, IPPROTO_IPIP, AF_INET); 94 x->props.saddr.a4, IPPROTO_IPIP, AF_INET);
90 if (!t) { 95 if (!t) {
91 t = ipcomp_tunnel_create(x); 96 t = ipcomp_tunnel_create(x);
@@ -124,16 +129,12 @@ static int ipcomp4_init_state(struct xfrm_state *x)
124 if (x->props.mode == XFRM_MODE_TUNNEL) { 129 if (x->props.mode == XFRM_MODE_TUNNEL) {
125 err = ipcomp_tunnel_attach(x); 130 err = ipcomp_tunnel_attach(x);
126 if (err) 131 if (err)
127 goto error_tunnel; 132 goto out;
128 } 133 }
129 134
130 err = 0; 135 err = 0;
131out: 136out:
132 return err; 137 return err;
133
134error_tunnel:
135 ipcomp_destroy(x);
136 goto out;
137} 138}
138 139
139static const struct xfrm_type ipcomp_type = { 140static const struct xfrm_type ipcomp_type = {
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 4e08b7f2331c..678909281648 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -187,6 +187,16 @@ struct ic_device {
187static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */ 187static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */
188static struct net_device *ic_dev __initdata = NULL; /* Selected device */ 188static struct net_device *ic_dev __initdata = NULL; /* Selected device */
189 189
190static bool __init ic_device_match(struct net_device *dev)
191{
192 if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) :
193 (!(dev->flags & IFF_LOOPBACK) &&
194 (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) &&
195 strncmp(dev->name, "dummy", 5)))
196 return true;
197 return false;
198}
199
190static int __init ic_open_devs(void) 200static int __init ic_open_devs(void)
191{ 201{
192 struct ic_device *d, **last; 202 struct ic_device *d, **last;
@@ -207,10 +217,7 @@ static int __init ic_open_devs(void)
207 for_each_netdev(&init_net, dev) { 217 for_each_netdev(&init_net, dev) {
208 if (dev->flags & IFF_LOOPBACK) 218 if (dev->flags & IFF_LOOPBACK)
209 continue; 219 continue;
210 if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) : 220 if (ic_device_match(dev)) {
211 (!(dev->flags & IFF_LOOPBACK) &&
212 (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) &&
213 strncmp(dev->name, "dummy", 5))) {
214 int able = 0; 221 int able = 0;
215 if (dev->mtu >= 364) 222 if (dev->mtu >= 364)
216 able |= IC_BOOTP; 223 able |= IC_BOOTP;
@@ -228,7 +235,7 @@ static int __init ic_open_devs(void)
228 } 235 }
229 if (!(d = kmalloc(sizeof(struct ic_device), GFP_KERNEL))) { 236 if (!(d = kmalloc(sizeof(struct ic_device), GFP_KERNEL))) {
230 rtnl_unlock(); 237 rtnl_unlock();
231 return -1; 238 return -ENOMEM;
232 } 239 }
233 d->dev = dev; 240 d->dev = dev;
234 *last = d; 241 *last = d;
@@ -253,7 +260,7 @@ static int __init ic_open_devs(void)
253 printk(KERN_ERR "IP-Config: Device `%s' not found.\n", user_dev_name); 260 printk(KERN_ERR "IP-Config: Device `%s' not found.\n", user_dev_name);
254 else 261 else
255 printk(KERN_ERR "IP-Config: No network devices available.\n"); 262 printk(KERN_ERR "IP-Config: No network devices available.\n");
256 return -1; 263 return -ENODEV;
257 } 264 }
258 return 0; 265 return 0;
259} 266}
@@ -1303,6 +1310,32 @@ __be32 __init root_nfs_parse_addr(char *name)
1303 return addr; 1310 return addr;
1304} 1311}
1305 1312
1313#define DEVICE_WAIT_MAX 12 /* 12 seconds */
1314
1315static int __init wait_for_devices(void)
1316{
1317 int i;
1318
1319 msleep(CONF_PRE_OPEN);
1320 for (i = 0; i < DEVICE_WAIT_MAX; i++) {
1321 struct net_device *dev;
1322 int found = 0;
1323
1324 rtnl_lock();
1325 for_each_netdev(&init_net, dev) {
1326 if (ic_device_match(dev)) {
1327 found = 1;
1328 break;
1329 }
1330 }
1331 rtnl_unlock();
1332 if (found)
1333 return 0;
1334 ssleep(1);
1335 }
1336 return -ENODEV;
1337}
1338
1306/* 1339/*
1307 * IP Autoconfig dispatcher. 1340 * IP Autoconfig dispatcher.
1308 */ 1341 */
@@ -1313,6 +1346,7 @@ static int __init ip_auto_config(void)
1313#ifdef IPCONFIG_DYNAMIC 1346#ifdef IPCONFIG_DYNAMIC
1314 int retries = CONF_OPEN_RETRIES; 1347 int retries = CONF_OPEN_RETRIES;
1315#endif 1348#endif
1349 int err;
1316 1350
1317#ifdef CONFIG_PROC_FS 1351#ifdef CONFIG_PROC_FS
1318 proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); 1352 proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops);
@@ -1325,12 +1359,15 @@ static int __init ip_auto_config(void)
1325#ifdef IPCONFIG_DYNAMIC 1359#ifdef IPCONFIG_DYNAMIC
1326 try_try_again: 1360 try_try_again:
1327#endif 1361#endif
1328 /* Give hardware a chance to settle */ 1362 /* Wait for devices to appear */
1329 msleep(CONF_PRE_OPEN); 1363 err = wait_for_devices();
1364 if (err)
1365 return err;
1330 1366
1331 /* Setup all network devices */ 1367 /* Setup all network devices */
1332 if (ic_open_devs() < 0) 1368 err = ic_open_devs();
1333 return -1; 1369 if (err)
1370 return err;
1334 1371
1335 /* Give drivers a chance to settle */ 1372 /* Give drivers a chance to settle */
1336 ssleep(CONF_POST_OPEN); 1373 ssleep(CONF_POST_OPEN);
@@ -1446,7 +1483,7 @@ late_initcall(ip_auto_config);
1446 1483
1447/* 1484/*
1448 * Decode any IP configuration options in the "ip=" or "nfsaddrs=" kernel 1485 * Decode any IP configuration options in the "ip=" or "nfsaddrs=" kernel
1449 * command line parameter. See Documentation/filesystems/nfsroot.txt. 1486 * command line parameter. See Documentation/filesystems/nfs/nfsroot.txt.
1450 */ 1487 */
1451static int __init ic_proto_name(char *name) 1488static int __init ic_proto_name(char *name)
1452{ 1489{
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index eda04fed3379..2f302d3ac9a3 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -130,7 +130,6 @@ struct ipip_net {
130 struct net_device *fb_tunnel_dev; 130 struct net_device *fb_tunnel_dev;
131}; 131};
132 132
133static void ipip_fb_tunnel_init(struct net_device *dev);
134static void ipip_tunnel_init(struct net_device *dev); 133static void ipip_tunnel_init(struct net_device *dev);
135static void ipip_tunnel_setup(struct net_device *dev); 134static void ipip_tunnel_setup(struct net_device *dev);
136 135
@@ -730,7 +729,7 @@ static void ipip_tunnel_init(struct net_device *dev)
730 ipip_tunnel_bind_dev(dev); 729 ipip_tunnel_bind_dev(dev);
731} 730}
732 731
733static void ipip_fb_tunnel_init(struct net_device *dev) 732static void __net_init ipip_fb_tunnel_init(struct net_device *dev)
734{ 733{
735 struct ip_tunnel *tunnel = netdev_priv(dev); 734 struct ip_tunnel *tunnel = netdev_priv(dev);
736 struct iphdr *iph = &tunnel->parms.iph; 735 struct iphdr *iph = &tunnel->parms.iph;
@@ -773,7 +772,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
773 } 772 }
774} 773}
775 774
776static int ipip_init_net(struct net *net) 775static int __net_init ipip_init_net(struct net *net)
777{ 776{
778 struct ipip_net *ipn = net_generic(net, ipip_net_id); 777 struct ipip_net *ipn = net_generic(net, ipip_net_id);
779 int err; 778 int err;
@@ -806,7 +805,7 @@ err_alloc_dev:
806 return err; 805 return err;
807} 806}
808 807
809static void ipip_exit_net(struct net *net) 808static void __net_exit ipip_exit_net(struct net *net)
810{ 809{
811 struct ipip_net *ipn = net_generic(net, ipip_net_id); 810 struct ipip_net *ipn = net_generic(net, ipip_net_id);
812 LIST_HEAD(list); 811 LIST_HEAD(list);
@@ -831,15 +830,14 @@ static int __init ipip_init(void)
831 830
832 printk(banner); 831 printk(banner);
833 832
834 if (xfrm4_tunnel_register(&ipip_handler, AF_INET)) { 833 err = register_pernet_device(&ipip_net_ops);
834 if (err < 0)
835 return err;
836 err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
837 if (err < 0) {
838 unregister_pernet_device(&ipip_net_ops);
835 printk(KERN_INFO "ipip init: can't register tunnel\n"); 839 printk(KERN_INFO "ipip init: can't register tunnel\n");
836 return -EAGAIN;
837 } 840 }
838
839 err = register_pernet_device(&ipip_net_ops);
840 if (err)
841 xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
842
843 return err; 841 return err;
844} 842}
845 843
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 54596f73eff5..d0a6092a67be 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -802,6 +802,9 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
802 int line; 802 int line;
803 struct mfc_cache *uc, *c, **cp; 803 struct mfc_cache *uc, *c, **cp;
804 804
805 if (mfc->mfcc_parent >= MAXVIFS)
806 return -ENFILE;
807
805 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 808 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
806 809
807 for (cp = &net->ipv4.mfc_cache_array[line]; 810 for (cp = &net->ipv4.mfc_cache_array[line];
@@ -1163,9 +1166,6 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
1163 int ct; 1166 int ct;
1164 LIST_HEAD(list); 1167 LIST_HEAD(list);
1165 1168
1166 if (!net_eq(dev_net(dev), net))
1167 return NOTIFY_DONE;
1168
1169 if (event != NETDEV_UNREGISTER) 1169 if (event != NETDEV_UNREGISTER)
1170 return NOTIFY_DONE; 1170 return NOTIFY_DONE;
1171 v = &net->ipv4.vif_table[0]; 1171 v = &net->ipv4.vif_table[0];
@@ -1616,17 +1616,20 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1616 int ct; 1616 int ct;
1617 struct rtnexthop *nhp; 1617 struct rtnexthop *nhp;
1618 struct net *net = mfc_net(c); 1618 struct net *net = mfc_net(c);
1619 struct net_device *dev = net->ipv4.vif_table[c->mfc_parent].dev;
1620 u8 *b = skb_tail_pointer(skb); 1619 u8 *b = skb_tail_pointer(skb);
1621 struct rtattr *mp_head; 1620 struct rtattr *mp_head;
1622 1621
1623 if (dev) 1622 /* If cache is unresolved, don't try to parse IIF and OIF */
1624 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); 1623 if (c->mfc_parent > MAXVIFS)
1624 return -ENOENT;
1625
1626 if (VIF_EXISTS(net, c->mfc_parent))
1627 RTA_PUT(skb, RTA_IIF, 4, &net->ipv4.vif_table[c->mfc_parent].dev->ifindex);
1625 1628
1626 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); 1629 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1627 1630
1628 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { 1631 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1629 if (c->mfc_un.res.ttls[ct] < 255) { 1632 if (VIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) {
1630 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 1633 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1631 goto rtattr_failure; 1634 goto rtattr_failure;
1632 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 1635 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 06632762ba5f..f07d77f65751 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -27,6 +27,7 @@
27 27
28#include <linux/netfilter/x_tables.h> 28#include <linux/netfilter/x_tables.h>
29#include <linux/netfilter_arp/arp_tables.h> 29#include <linux/netfilter_arp/arp_tables.h>
30#include "../../netfilter/xt_repldata.h"
30 31
31MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
32MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); 33MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
@@ -58,6 +59,12 @@ do { \
58#define ARP_NF_ASSERT(x) 59#define ARP_NF_ASSERT(x)
59#endif 60#endif
60 61
62void *arpt_alloc_initial_table(const struct xt_table *info)
63{
64 return xt_alloc_initial_table(arpt, ARPT);
65}
66EXPORT_SYMBOL_GPL(arpt_alloc_initial_table);
67
61static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, 68static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
62 const char *hdr_addr, int len) 69 const char *hdr_addr, int len)
63{ 70{
@@ -226,7 +233,14 @@ arpt_error(struct sk_buff *skb, const struct xt_target_param *par)
226 return NF_DROP; 233 return NF_DROP;
227} 234}
228 235
229static inline struct arpt_entry *get_entry(void *base, unsigned int offset) 236static inline const struct arpt_entry_target *
237arpt_get_target_c(const struct arpt_entry *e)
238{
239 return arpt_get_target((struct arpt_entry *)e);
240}
241
242static inline struct arpt_entry *
243get_entry(const void *base, unsigned int offset)
230{ 244{
231 return (struct arpt_entry *)(base + offset); 245 return (struct arpt_entry *)(base + offset);
232} 246}
@@ -273,7 +287,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
273 287
274 arp = arp_hdr(skb); 288 arp = arp_hdr(skb);
275 do { 289 do {
276 struct arpt_entry_target *t; 290 const struct arpt_entry_target *t;
277 int hdr_len; 291 int hdr_len;
278 292
279 if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { 293 if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
@@ -285,7 +299,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
285 (2 * skb->dev->addr_len); 299 (2 * skb->dev->addr_len);
286 ADD_COUNTER(e->counters, hdr_len, 1); 300 ADD_COUNTER(e->counters, hdr_len, 1);
287 301
288 t = arpt_get_target(e); 302 t = arpt_get_target_c(e);
289 303
290 /* Standard target? */ 304 /* Standard target? */
291 if (!t->u.kernel.target->target) { 305 if (!t->u.kernel.target->target) {
@@ -351,7 +365,7 @@ static inline bool unconditional(const struct arpt_arp *arp)
351/* Figures out from what hook each rule can be called: returns 0 if 365/* Figures out from what hook each rule can be called: returns 0 if
352 * there are loops. Puts hook bitmask in comefrom. 366 * there are loops. Puts hook bitmask in comefrom.
353 */ 367 */
354static int mark_source_chains(struct xt_table_info *newinfo, 368static int mark_source_chains(const struct xt_table_info *newinfo,
355 unsigned int valid_hooks, void *entry0) 369 unsigned int valid_hooks, void *entry0)
356{ 370{
357 unsigned int hook; 371 unsigned int hook;
@@ -372,7 +386,7 @@ static int mark_source_chains(struct xt_table_info *newinfo,
372 386
373 for (;;) { 387 for (;;) {
374 const struct arpt_standard_target *t 388 const struct arpt_standard_target *t
375 = (void *)arpt_get_target(e); 389 = (void *)arpt_get_target_c(e);
376 int visited = e->comefrom & (1 << hook); 390 int visited = e->comefrom & (1 << hook);
377 391
378 if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { 392 if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) {
@@ -456,7 +470,7 @@ static int mark_source_chains(struct xt_table_info *newinfo,
456 return 1; 470 return 1;
457} 471}
458 472
459static inline int check_entry(struct arpt_entry *e, const char *name) 473static inline int check_entry(const struct arpt_entry *e, const char *name)
460{ 474{
461 const struct arpt_entry_target *t; 475 const struct arpt_entry_target *t;
462 476
@@ -468,7 +482,7 @@ static inline int check_entry(struct arpt_entry *e, const char *name)
468 if (e->target_offset + sizeof(struct arpt_entry_target) > e->next_offset) 482 if (e->target_offset + sizeof(struct arpt_entry_target) > e->next_offset)
469 return -EINVAL; 483 return -EINVAL;
470 484
471 t = arpt_get_target(e); 485 t = arpt_get_target_c(e);
472 if (e->target_offset + t->u.target_size > e->next_offset) 486 if (e->target_offset + t->u.target_size > e->next_offset)
473 return -EINVAL; 487 return -EINVAL;
474 488
@@ -498,8 +512,7 @@ static inline int check_target(struct arpt_entry *e, const char *name)
498} 512}
499 513
500static inline int 514static inline int
501find_check_entry(struct arpt_entry *e, const char *name, unsigned int size, 515find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
502 unsigned int *i)
503{ 516{
504 struct arpt_entry_target *t; 517 struct arpt_entry_target *t;
505 struct xt_target *target; 518 struct xt_target *target;
@@ -524,8 +537,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
524 ret = check_target(e, name); 537 ret = check_target(e, name);
525 if (ret) 538 if (ret)
526 goto err; 539 goto err;
527
528 (*i)++;
529 return 0; 540 return 0;
530err: 541err:
531 module_put(t->u.kernel.target->me); 542 module_put(t->u.kernel.target->me);
@@ -533,14 +544,14 @@ out:
533 return ret; 544 return ret;
534} 545}
535 546
536static bool check_underflow(struct arpt_entry *e) 547static bool check_underflow(const struct arpt_entry *e)
537{ 548{
538 const struct arpt_entry_target *t; 549 const struct arpt_entry_target *t;
539 unsigned int verdict; 550 unsigned int verdict;
540 551
541 if (!unconditional(&e->arp)) 552 if (!unconditional(&e->arp))
542 return false; 553 return false;
543 t = arpt_get_target(e); 554 t = arpt_get_target_c(e);
544 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) 555 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
545 return false; 556 return false;
546 verdict = ((struct arpt_standard_target *)t)->verdict; 557 verdict = ((struct arpt_standard_target *)t)->verdict;
@@ -550,12 +561,11 @@ static bool check_underflow(struct arpt_entry *e)
550 561
551static inline int check_entry_size_and_hooks(struct arpt_entry *e, 562static inline int check_entry_size_and_hooks(struct arpt_entry *e,
552 struct xt_table_info *newinfo, 563 struct xt_table_info *newinfo,
553 unsigned char *base, 564 const unsigned char *base,
554 unsigned char *limit, 565 const unsigned char *limit,
555 const unsigned int *hook_entries, 566 const unsigned int *hook_entries,
556 const unsigned int *underflows, 567 const unsigned int *underflows,
557 unsigned int valid_hooks, 568 unsigned int valid_hooks)
558 unsigned int *i)
559{ 569{
560 unsigned int h; 570 unsigned int h;
561 571
@@ -592,19 +602,14 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
592 /* Clear counters and comefrom */ 602 /* Clear counters and comefrom */
593 e->counters = ((struct xt_counters) { 0, 0 }); 603 e->counters = ((struct xt_counters) { 0, 0 });
594 e->comefrom = 0; 604 e->comefrom = 0;
595
596 (*i)++;
597 return 0; 605 return 0;
598} 606}
599 607
600static inline int cleanup_entry(struct arpt_entry *e, unsigned int *i) 608static inline void cleanup_entry(struct arpt_entry *e)
601{ 609{
602 struct xt_tgdtor_param par; 610 struct xt_tgdtor_param par;
603 struct arpt_entry_target *t; 611 struct arpt_entry_target *t;
604 612
605 if (i && (*i)-- == 0)
606 return 1;
607
608 t = arpt_get_target(e); 613 t = arpt_get_target(e);
609 par.target = t->u.kernel.target; 614 par.target = t->u.kernel.target;
610 par.targinfo = t->data; 615 par.targinfo = t->data;
@@ -612,26 +617,20 @@ static inline int cleanup_entry(struct arpt_entry *e, unsigned int *i)
612 if (par.target->destroy != NULL) 617 if (par.target->destroy != NULL)
613 par.target->destroy(&par); 618 par.target->destroy(&par);
614 module_put(par.target->me); 619 module_put(par.target->me);
615 return 0;
616} 620}
617 621
618/* Checks and translates the user-supplied table segment (held in 622/* Checks and translates the user-supplied table segment (held in
619 * newinfo). 623 * newinfo).
620 */ 624 */
621static int translate_table(const char *name, 625static int translate_table(struct xt_table_info *newinfo, void *entry0,
622 unsigned int valid_hooks, 626 const struct arpt_replace *repl)
623 struct xt_table_info *newinfo,
624 void *entry0,
625 unsigned int size,
626 unsigned int number,
627 const unsigned int *hook_entries,
628 const unsigned int *underflows)
629{ 627{
628 struct arpt_entry *iter;
630 unsigned int i; 629 unsigned int i;
631 int ret; 630 int ret = 0;
632 631
633 newinfo->size = size; 632 newinfo->size = repl->size;
634 newinfo->number = number; 633 newinfo->number = repl->num_entries;
635 634
636 /* Init all hooks to impossible value. */ 635 /* Init all hooks to impossible value. */
637 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 636 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
@@ -643,52 +642,63 @@ static int translate_table(const char *name,
643 i = 0; 642 i = 0;
644 643
645 /* Walk through entries, checking offsets. */ 644 /* Walk through entries, checking offsets. */
646 ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size, 645 xt_entry_foreach(iter, entry0, newinfo->size) {
647 check_entry_size_and_hooks, 646 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
648 newinfo, 647 entry0 + repl->size,
649 entry0, 648 repl->hook_entry,
650 entry0 + size, 649 repl->underflow,
651 hook_entries, underflows, valid_hooks, &i); 650 repl->valid_hooks);
651 if (ret != 0)
652 break;
653 ++i;
654 }
652 duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); 655 duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
653 if (ret != 0) 656 if (ret != 0)
654 return ret; 657 return ret;
655 658
656 if (i != number) { 659 if (i != repl->num_entries) {
657 duprintf("translate_table: %u not %u entries\n", 660 duprintf("translate_table: %u not %u entries\n",
658 i, number); 661 i, repl->num_entries);
659 return -EINVAL; 662 return -EINVAL;
660 } 663 }
661 664
662 /* Check hooks all assigned */ 665 /* Check hooks all assigned */
663 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 666 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
664 /* Only hooks which are valid */ 667 /* Only hooks which are valid */
665 if (!(valid_hooks & (1 << i))) 668 if (!(repl->valid_hooks & (1 << i)))
666 continue; 669 continue;
667 if (newinfo->hook_entry[i] == 0xFFFFFFFF) { 670 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
668 duprintf("Invalid hook entry %u %u\n", 671 duprintf("Invalid hook entry %u %u\n",
669 i, hook_entries[i]); 672 i, repl->hook_entry[i]);
670 return -EINVAL; 673 return -EINVAL;
671 } 674 }
672 if (newinfo->underflow[i] == 0xFFFFFFFF) { 675 if (newinfo->underflow[i] == 0xFFFFFFFF) {
673 duprintf("Invalid underflow %u %u\n", 676 duprintf("Invalid underflow %u %u\n",
674 i, underflows[i]); 677 i, repl->underflow[i]);
675 return -EINVAL; 678 return -EINVAL;
676 } 679 }
677 } 680 }
678 681
679 if (!mark_source_chains(newinfo, valid_hooks, entry0)) { 682 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
680 duprintf("Looping hook\n"); 683 duprintf("Looping hook\n");
681 return -ELOOP; 684 return -ELOOP;
682 } 685 }
683 686
684 /* Finally, each sanity check must pass */ 687 /* Finally, each sanity check must pass */
685 i = 0; 688 i = 0;
686 ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size, 689 xt_entry_foreach(iter, entry0, newinfo->size) {
687 find_check_entry, name, size, &i); 690 ret = find_check_entry(iter, repl->name, repl->size);
691 if (ret != 0)
692 break;
693 ++i;
694 }
688 695
689 if (ret != 0) { 696 if (ret != 0) {
690 ARPT_ENTRY_ITERATE(entry0, newinfo->size, 697 xt_entry_foreach(iter, entry0, newinfo->size) {
691 cleanup_entry, &i); 698 if (i-- == 0)
699 break;
700 cleanup_entry(iter);
701 }
692 return ret; 702 return ret;
693 } 703 }
694 704
@@ -701,30 +711,10 @@ static int translate_table(const char *name,
701 return ret; 711 return ret;
702} 712}
703 713
704/* Gets counters. */
705static inline int add_entry_to_counter(const struct arpt_entry *e,
706 struct xt_counters total[],
707 unsigned int *i)
708{
709 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
710
711 (*i)++;
712 return 0;
713}
714
715static inline int set_entry_to_counter(const struct arpt_entry *e,
716 struct xt_counters total[],
717 unsigned int *i)
718{
719 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
720
721 (*i)++;
722 return 0;
723}
724
725static void get_counters(const struct xt_table_info *t, 714static void get_counters(const struct xt_table_info *t,
726 struct xt_counters counters[]) 715 struct xt_counters counters[])
727{ 716{
717 struct arpt_entry *iter;
728 unsigned int cpu; 718 unsigned int cpu;
729 unsigned int i; 719 unsigned int i;
730 unsigned int curcpu; 720 unsigned int curcpu;
@@ -740,32 +730,32 @@ static void get_counters(const struct xt_table_info *t,
740 curcpu = smp_processor_id(); 730 curcpu = smp_processor_id();
741 731
742 i = 0; 732 i = 0;
743 ARPT_ENTRY_ITERATE(t->entries[curcpu], 733 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
744 t->size, 734 SET_COUNTER(counters[i], iter->counters.bcnt,
745 set_entry_to_counter, 735 iter->counters.pcnt);
746 counters, 736 ++i;
747 &i); 737 }
748 738
749 for_each_possible_cpu(cpu) { 739 for_each_possible_cpu(cpu) {
750 if (cpu == curcpu) 740 if (cpu == curcpu)
751 continue; 741 continue;
752 i = 0; 742 i = 0;
753 xt_info_wrlock(cpu); 743 xt_info_wrlock(cpu);
754 ARPT_ENTRY_ITERATE(t->entries[cpu], 744 xt_entry_foreach(iter, t->entries[cpu], t->size) {
755 t->size, 745 ADD_COUNTER(counters[i], iter->counters.bcnt,
756 add_entry_to_counter, 746 iter->counters.pcnt);
757 counters, 747 ++i;
758 &i); 748 }
759 xt_info_wrunlock(cpu); 749 xt_info_wrunlock(cpu);
760 } 750 }
761 local_bh_enable(); 751 local_bh_enable();
762} 752}
763 753
764static struct xt_counters *alloc_counters(struct xt_table *table) 754static struct xt_counters *alloc_counters(const struct xt_table *table)
765{ 755{
766 unsigned int countersize; 756 unsigned int countersize;
767 struct xt_counters *counters; 757 struct xt_counters *counters;
768 struct xt_table_info *private = table->private; 758 const struct xt_table_info *private = table->private;
769 759
770 /* We need atomic snapshot of counters: rest doesn't change 760 /* We need atomic snapshot of counters: rest doesn't change
771 * (other than comefrom, which userspace doesn't care 761 * (other than comefrom, which userspace doesn't care
@@ -783,11 +773,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
783} 773}
784 774
785static int copy_entries_to_user(unsigned int total_size, 775static int copy_entries_to_user(unsigned int total_size,
786 struct xt_table *table, 776 const struct xt_table *table,
787 void __user *userptr) 777 void __user *userptr)
788{ 778{
789 unsigned int off, num; 779 unsigned int off, num;
790 struct arpt_entry *e; 780 const struct arpt_entry *e;
791 struct xt_counters *counters; 781 struct xt_counters *counters;
792 struct xt_table_info *private = table->private; 782 struct xt_table_info *private = table->private;
793 int ret = 0; 783 int ret = 0;
@@ -807,7 +797,7 @@ static int copy_entries_to_user(unsigned int total_size,
807 /* FIXME: use iterator macros --RR */ 797 /* FIXME: use iterator macros --RR */
808 /* ... then go back and fix counters and names */ 798 /* ... then go back and fix counters and names */
809 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ 799 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
810 struct arpt_entry_target *t; 800 const struct arpt_entry_target *t;
811 801
812 e = (struct arpt_entry *)(loc_cpu_entry + off); 802 e = (struct arpt_entry *)(loc_cpu_entry + off);
813 if (copy_to_user(userptr + off 803 if (copy_to_user(userptr + off
@@ -818,7 +808,7 @@ static int copy_entries_to_user(unsigned int total_size,
818 goto free_counters; 808 goto free_counters;
819 } 809 }
820 810
821 t = arpt_get_target(e); 811 t = arpt_get_target_c(e);
822 if (copy_to_user(userptr + off + e->target_offset 812 if (copy_to_user(userptr + off + e->target_offset
823 + offsetof(struct arpt_entry_target, 813 + offsetof(struct arpt_entry_target,
824 u.user.name), 814 u.user.name),
@@ -835,7 +825,7 @@ static int copy_entries_to_user(unsigned int total_size,
835} 825}
836 826
837#ifdef CONFIG_COMPAT 827#ifdef CONFIG_COMPAT
838static void compat_standard_from_user(void *dst, void *src) 828static void compat_standard_from_user(void *dst, const void *src)
839{ 829{
840 int v = *(compat_int_t *)src; 830 int v = *(compat_int_t *)src;
841 831
@@ -844,7 +834,7 @@ static void compat_standard_from_user(void *dst, void *src)
844 memcpy(dst, &v, sizeof(v)); 834 memcpy(dst, &v, sizeof(v));
845} 835}
846 836
847static int compat_standard_to_user(void __user *dst, void *src) 837static int compat_standard_to_user(void __user *dst, const void *src)
848{ 838{
849 compat_int_t cv = *(int *)src; 839 compat_int_t cv = *(int *)src;
850 840
@@ -853,18 +843,18 @@ static int compat_standard_to_user(void __user *dst, void *src)
853 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; 843 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
854} 844}
855 845
856static int compat_calc_entry(struct arpt_entry *e, 846static int compat_calc_entry(const struct arpt_entry *e,
857 const struct xt_table_info *info, 847 const struct xt_table_info *info,
858 void *base, struct xt_table_info *newinfo) 848 const void *base, struct xt_table_info *newinfo)
859{ 849{
860 struct arpt_entry_target *t; 850 const struct arpt_entry_target *t;
861 unsigned int entry_offset; 851 unsigned int entry_offset;
862 int off, i, ret; 852 int off, i, ret;
863 853
864 off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); 854 off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
865 entry_offset = (void *)e - base; 855 entry_offset = (void *)e - base;
866 856
867 t = arpt_get_target(e); 857 t = arpt_get_target_c(e);
868 off += xt_compat_target_offset(t->u.kernel.target); 858 off += xt_compat_target_offset(t->u.kernel.target);
869 newinfo->size -= off; 859 newinfo->size -= off;
870 ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); 860 ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);
@@ -885,7 +875,9 @@ static int compat_calc_entry(struct arpt_entry *e,
885static int compat_table_info(const struct xt_table_info *info, 875static int compat_table_info(const struct xt_table_info *info,
886 struct xt_table_info *newinfo) 876 struct xt_table_info *newinfo)
887{ 877{
878 struct arpt_entry *iter;
888 void *loc_cpu_entry; 879 void *loc_cpu_entry;
880 int ret;
889 881
890 if (!newinfo || !info) 882 if (!newinfo || !info)
891 return -EINVAL; 883 return -EINVAL;
@@ -894,13 +886,17 @@ static int compat_table_info(const struct xt_table_info *info,
894 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); 886 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
895 newinfo->initial_entries = 0; 887 newinfo->initial_entries = 0;
896 loc_cpu_entry = info->entries[raw_smp_processor_id()]; 888 loc_cpu_entry = info->entries[raw_smp_processor_id()];
897 return ARPT_ENTRY_ITERATE(loc_cpu_entry, info->size, 889 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
898 compat_calc_entry, info, loc_cpu_entry, 890 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
899 newinfo); 891 if (ret != 0)
892 return ret;
893 }
894 return 0;
900} 895}
901#endif 896#endif
902 897
903static int get_info(struct net *net, void __user *user, int *len, int compat) 898static int get_info(struct net *net, void __user *user,
899 const int *len, int compat)
904{ 900{
905 char name[ARPT_TABLE_MAXNAMELEN]; 901 char name[ARPT_TABLE_MAXNAMELEN];
906 struct xt_table *t; 902 struct xt_table *t;
@@ -925,10 +921,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
925 if (t && !IS_ERR(t)) { 921 if (t && !IS_ERR(t)) {
926 struct arpt_getinfo info; 922 struct arpt_getinfo info;
927 const struct xt_table_info *private = t->private; 923 const struct xt_table_info *private = t->private;
928
929#ifdef CONFIG_COMPAT 924#ifdef CONFIG_COMPAT
925 struct xt_table_info tmp;
926
930 if (compat) { 927 if (compat) {
931 struct xt_table_info tmp;
932 ret = compat_table_info(private, &tmp); 928 ret = compat_table_info(private, &tmp);
933 xt_compat_flush_offsets(NFPROTO_ARP); 929 xt_compat_flush_offsets(NFPROTO_ARP);
934 private = &tmp; 930 private = &tmp;
@@ -959,7 +955,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
959} 955}
960 956
961static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, 957static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
962 int *len) 958 const int *len)
963{ 959{
964 int ret; 960 int ret;
965 struct arpt_get_entries get; 961 struct arpt_get_entries get;
@@ -1010,6 +1006,7 @@ static int __do_replace(struct net *net, const char *name,
1010 struct xt_table_info *oldinfo; 1006 struct xt_table_info *oldinfo;
1011 struct xt_counters *counters; 1007 struct xt_counters *counters;
1012 void *loc_cpu_old_entry; 1008 void *loc_cpu_old_entry;
1009 struct arpt_entry *iter;
1013 1010
1014 ret = 0; 1011 ret = 0;
1015 counters = vmalloc_node(num_counters * sizeof(struct xt_counters), 1012 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
@@ -1053,8 +1050,8 @@ static int __do_replace(struct net *net, const char *name,
1053 1050
1054 /* Decrease module usage counts and free resource */ 1051 /* Decrease module usage counts and free resource */
1055 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1052 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1056 ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, 1053 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1057 NULL); 1054 cleanup_entry(iter);
1058 1055
1059 xt_free_table_info(oldinfo); 1056 xt_free_table_info(oldinfo);
1060 if (copy_to_user(counters_ptr, counters, 1057 if (copy_to_user(counters_ptr, counters,
@@ -1073,12 +1070,14 @@ static int __do_replace(struct net *net, const char *name,
1073 return ret; 1070 return ret;
1074} 1071}
1075 1072
1076static int do_replace(struct net *net, void __user *user, unsigned int len) 1073static int do_replace(struct net *net, const void __user *user,
1074 unsigned int len)
1077{ 1075{
1078 int ret; 1076 int ret;
1079 struct arpt_replace tmp; 1077 struct arpt_replace tmp;
1080 struct xt_table_info *newinfo; 1078 struct xt_table_info *newinfo;
1081 void *loc_cpu_entry; 1079 void *loc_cpu_entry;
1080 struct arpt_entry *iter;
1082 1081
1083 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1082 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1084 return -EFAULT; 1083 return -EFAULT;
@@ -1099,9 +1098,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1099 goto free_newinfo; 1098 goto free_newinfo;
1100 } 1099 }
1101 1100
1102 ret = translate_table(tmp.name, tmp.valid_hooks, 1101 ret = translate_table(newinfo, loc_cpu_entry, &tmp);
1103 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1104 tmp.hook_entry, tmp.underflow);
1105 if (ret != 0) 1102 if (ret != 0)
1106 goto free_newinfo; 1103 goto free_newinfo;
1107 1104
@@ -1114,27 +1111,15 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1114 return 0; 1111 return 0;
1115 1112
1116 free_newinfo_untrans: 1113 free_newinfo_untrans:
1117 ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1114 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1115 cleanup_entry(iter);
1118 free_newinfo: 1116 free_newinfo:
1119 xt_free_table_info(newinfo); 1117 xt_free_table_info(newinfo);
1120 return ret; 1118 return ret;
1121} 1119}
1122 1120
1123/* We're lazy, and add to the first CPU; overflow works its fey magic 1121static int do_add_counters(struct net *net, const void __user *user,
1124 * and everything is OK. */ 1122 unsigned int len, int compat)
1125static int
1126add_counter_to_entry(struct arpt_entry *e,
1127 const struct xt_counters addme[],
1128 unsigned int *i)
1129{
1130 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1131
1132 (*i)++;
1133 return 0;
1134}
1135
1136static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1137 int compat)
1138{ 1123{
1139 unsigned int i, curcpu; 1124 unsigned int i, curcpu;
1140 struct xt_counters_info tmp; 1125 struct xt_counters_info tmp;
@@ -1147,6 +1132,7 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1147 const struct xt_table_info *private; 1132 const struct xt_table_info *private;
1148 int ret = 0; 1133 int ret = 0;
1149 void *loc_cpu_entry; 1134 void *loc_cpu_entry;
1135 struct arpt_entry *iter;
1150#ifdef CONFIG_COMPAT 1136#ifdef CONFIG_COMPAT
1151 struct compat_xt_counters_info compat_tmp; 1137 struct compat_xt_counters_info compat_tmp;
1152 1138
@@ -1204,11 +1190,10 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1204 curcpu = smp_processor_id(); 1190 curcpu = smp_processor_id();
1205 loc_cpu_entry = private->entries[curcpu]; 1191 loc_cpu_entry = private->entries[curcpu];
1206 xt_info_wrlock(curcpu); 1192 xt_info_wrlock(curcpu);
1207 ARPT_ENTRY_ITERATE(loc_cpu_entry, 1193 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1208 private->size, 1194 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1209 add_counter_to_entry, 1195 ++i;
1210 paddc, 1196 }
1211 &i);
1212 xt_info_wrunlock(curcpu); 1197 xt_info_wrunlock(curcpu);
1213 unlock_up_free: 1198 unlock_up_free:
1214 local_bh_enable(); 1199 local_bh_enable();
@@ -1221,28 +1206,22 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1221} 1206}
1222 1207
1223#ifdef CONFIG_COMPAT 1208#ifdef CONFIG_COMPAT
1224static inline int 1209static inline void compat_release_entry(struct compat_arpt_entry *e)
1225compat_release_entry(struct compat_arpt_entry *e, unsigned int *i)
1226{ 1210{
1227 struct arpt_entry_target *t; 1211 struct arpt_entry_target *t;
1228 1212
1229 if (i && (*i)-- == 0)
1230 return 1;
1231
1232 t = compat_arpt_get_target(e); 1213 t = compat_arpt_get_target(e);
1233 module_put(t->u.kernel.target->me); 1214 module_put(t->u.kernel.target->me);
1234 return 0;
1235} 1215}
1236 1216
1237static inline int 1217static inline int
1238check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, 1218check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
1239 struct xt_table_info *newinfo, 1219 struct xt_table_info *newinfo,
1240 unsigned int *size, 1220 unsigned int *size,
1241 unsigned char *base, 1221 const unsigned char *base,
1242 unsigned char *limit, 1222 const unsigned char *limit,
1243 unsigned int *hook_entries, 1223 const unsigned int *hook_entries,
1244 unsigned int *underflows, 1224 const unsigned int *underflows,
1245 unsigned int *i,
1246 const char *name) 1225 const char *name)
1247{ 1226{
1248 struct arpt_entry_target *t; 1227 struct arpt_entry_target *t;
@@ -1302,8 +1281,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
1302 /* Clear counters and comefrom */ 1281 /* Clear counters and comefrom */
1303 memset(&e->counters, 0, sizeof(e->counters)); 1282 memset(&e->counters, 0, sizeof(e->counters));
1304 e->comefrom = 0; 1283 e->comefrom = 0;
1305
1306 (*i)++;
1307 return 0; 1284 return 0;
1308 1285
1309release_target: 1286release_target:
@@ -1347,19 +1324,6 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
1347 return ret; 1324 return ret;
1348} 1325}
1349 1326
1350static inline int compat_check_entry(struct arpt_entry *e, const char *name,
1351 unsigned int *i)
1352{
1353 int ret;
1354
1355 ret = check_target(e, name);
1356 if (ret)
1357 return ret;
1358
1359 (*i)++;
1360 return 0;
1361}
1362
1363static int translate_compat_table(const char *name, 1327static int translate_compat_table(const char *name,
1364 unsigned int valid_hooks, 1328 unsigned int valid_hooks,
1365 struct xt_table_info **pinfo, 1329 struct xt_table_info **pinfo,
@@ -1372,8 +1336,10 @@ static int translate_compat_table(const char *name,
1372 unsigned int i, j; 1336 unsigned int i, j;
1373 struct xt_table_info *newinfo, *info; 1337 struct xt_table_info *newinfo, *info;
1374 void *pos, *entry0, *entry1; 1338 void *pos, *entry0, *entry1;
1339 struct compat_arpt_entry *iter0;
1340 struct arpt_entry *iter1;
1375 unsigned int size; 1341 unsigned int size;
1376 int ret; 1342 int ret = 0;
1377 1343
1378 info = *pinfo; 1344 info = *pinfo;
1379 entry0 = *pentry0; 1345 entry0 = *pentry0;
@@ -1390,13 +1356,17 @@ static int translate_compat_table(const char *name,
1390 j = 0; 1356 j = 0;
1391 xt_compat_lock(NFPROTO_ARP); 1357 xt_compat_lock(NFPROTO_ARP);
1392 /* Walk through entries, checking offsets. */ 1358 /* Walk through entries, checking offsets. */
1393 ret = COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size, 1359 xt_entry_foreach(iter0, entry0, total_size) {
1394 check_compat_entry_size_and_hooks, 1360 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1395 info, &size, entry0, 1361 entry0,
1396 entry0 + total_size, 1362 entry0 + total_size,
1397 hook_entries, underflows, &j, name); 1363 hook_entries,
1398 if (ret != 0) 1364 underflows,
1399 goto out_unlock; 1365 name);
1366 if (ret != 0)
1367 goto out_unlock;
1368 ++j;
1369 }
1400 1370
1401 ret = -EINVAL; 1371 ret = -EINVAL;
1402 if (j != number) { 1372 if (j != number) {
@@ -1435,9 +1405,12 @@ static int translate_compat_table(const char *name,
1435 entry1 = newinfo->entries[raw_smp_processor_id()]; 1405 entry1 = newinfo->entries[raw_smp_processor_id()];
1436 pos = entry1; 1406 pos = entry1;
1437 size = total_size; 1407 size = total_size;
1438 ret = COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size, 1408 xt_entry_foreach(iter0, entry0, total_size) {
1439 compat_copy_entry_from_user, 1409 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1440 &pos, &size, name, newinfo, entry1); 1410 name, newinfo, entry1);
1411 if (ret != 0)
1412 break;
1413 }
1441 xt_compat_flush_offsets(NFPROTO_ARP); 1414 xt_compat_flush_offsets(NFPROTO_ARP);
1442 xt_compat_unlock(NFPROTO_ARP); 1415 xt_compat_unlock(NFPROTO_ARP);
1443 if (ret) 1416 if (ret)
@@ -1448,13 +1421,32 @@ static int translate_compat_table(const char *name,
1448 goto free_newinfo; 1421 goto free_newinfo;
1449 1422
1450 i = 0; 1423 i = 0;
1451 ret = ARPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry, 1424 xt_entry_foreach(iter1, entry1, newinfo->size) {
1452 name, &i); 1425 ret = check_target(iter1, name);
1426 if (ret != 0)
1427 break;
1428 ++i;
1429 }
1453 if (ret) { 1430 if (ret) {
1431 /*
1432 * The first i matches need cleanup_entry (calls ->destroy)
1433 * because they had called ->check already. The other j-i
1434 * entries need only release.
1435 */
1436 int skip = i;
1454 j -= i; 1437 j -= i;
1455 COMPAT_ARPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i, 1438 xt_entry_foreach(iter0, entry0, newinfo->size) {
1456 compat_release_entry, &j); 1439 if (skip-- > 0)
1457 ARPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i); 1440 continue;
1441 if (j-- == 0)
1442 break;
1443 compat_release_entry(iter0);
1444 }
1445 xt_entry_foreach(iter1, entry1, newinfo->size) {
1446 if (i-- == 0)
1447 break;
1448 cleanup_entry(iter1);
1449 }
1458 xt_free_table_info(newinfo); 1450 xt_free_table_info(newinfo);
1459 return ret; 1451 return ret;
1460 } 1452 }
@@ -1472,7 +1464,11 @@ static int translate_compat_table(const char *name,
1472free_newinfo: 1464free_newinfo:
1473 xt_free_table_info(newinfo); 1465 xt_free_table_info(newinfo);
1474out: 1466out:
1475 COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j); 1467 xt_entry_foreach(iter0, entry0, total_size) {
1468 if (j-- == 0)
1469 break;
1470 compat_release_entry(iter0);
1471 }
1476 return ret; 1472 return ret;
1477out_unlock: 1473out_unlock:
1478 xt_compat_flush_offsets(NFPROTO_ARP); 1474 xt_compat_flush_offsets(NFPROTO_ARP);
@@ -1499,6 +1495,7 @@ static int compat_do_replace(struct net *net, void __user *user,
1499 struct compat_arpt_replace tmp; 1495 struct compat_arpt_replace tmp;
1500 struct xt_table_info *newinfo; 1496 struct xt_table_info *newinfo;
1501 void *loc_cpu_entry; 1497 void *loc_cpu_entry;
1498 struct arpt_entry *iter;
1502 1499
1503 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1500 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1504 return -EFAULT; 1501 return -EFAULT;
@@ -1536,7 +1533,8 @@ static int compat_do_replace(struct net *net, void __user *user,
1536 return 0; 1533 return 0;
1537 1534
1538 free_newinfo_untrans: 1535 free_newinfo_untrans:
1539 ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1536 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1537 cleanup_entry(iter);
1540 free_newinfo: 1538 free_newinfo:
1541 xt_free_table_info(newinfo); 1539 xt_free_table_info(newinfo);
1542 return ret; 1540 return ret;
@@ -1570,7 +1568,7 @@ static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user,
1570static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr, 1568static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
1571 compat_uint_t *size, 1569 compat_uint_t *size,
1572 struct xt_counters *counters, 1570 struct xt_counters *counters,
1573 unsigned int *i) 1571 unsigned int i)
1574{ 1572{
1575 struct arpt_entry_target *t; 1573 struct arpt_entry_target *t;
1576 struct compat_arpt_entry __user *ce; 1574 struct compat_arpt_entry __user *ce;
@@ -1578,14 +1576,12 @@ static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
1578 compat_uint_t origsize; 1576 compat_uint_t origsize;
1579 int ret; 1577 int ret;
1580 1578
1581 ret = -EFAULT;
1582 origsize = *size; 1579 origsize = *size;
1583 ce = (struct compat_arpt_entry __user *)*dstptr; 1580 ce = (struct compat_arpt_entry __user *)*dstptr;
1584 if (copy_to_user(ce, e, sizeof(struct arpt_entry))) 1581 if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 ||
1585 goto out; 1582 copy_to_user(&ce->counters, &counters[i],
1586 1583 sizeof(counters[i])) != 0)
1587 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i]))) 1584 return -EFAULT;
1588 goto out;
1589 1585
1590 *dstptr += sizeof(struct compat_arpt_entry); 1586 *dstptr += sizeof(struct compat_arpt_entry);
1591 *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); 1587 *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
@@ -1595,18 +1591,12 @@ static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
1595 t = arpt_get_target(e); 1591 t = arpt_get_target(e);
1596 ret = xt_compat_target_to_user(t, dstptr, size); 1592 ret = xt_compat_target_to_user(t, dstptr, size);
1597 if (ret) 1593 if (ret)
1598 goto out; 1594 return ret;
1599 ret = -EFAULT;
1600 next_offset = e->next_offset - (origsize - *size); 1595 next_offset = e->next_offset - (origsize - *size);
1601 if (put_user(target_offset, &ce->target_offset)) 1596 if (put_user(target_offset, &ce->target_offset) != 0 ||
1602 goto out; 1597 put_user(next_offset, &ce->next_offset) != 0)
1603 if (put_user(next_offset, &ce->next_offset)) 1598 return -EFAULT;
1604 goto out;
1605
1606 (*i)++;
1607 return 0; 1599 return 0;
1608out:
1609 return ret;
1610} 1600}
1611 1601
1612static int compat_copy_entries_to_user(unsigned int total_size, 1602static int compat_copy_entries_to_user(unsigned int total_size,
@@ -1620,6 +1610,7 @@ static int compat_copy_entries_to_user(unsigned int total_size,
1620 int ret = 0; 1610 int ret = 0;
1621 void *loc_cpu_entry; 1611 void *loc_cpu_entry;
1622 unsigned int i = 0; 1612 unsigned int i = 0;
1613 struct arpt_entry *iter;
1623 1614
1624 counters = alloc_counters(table); 1615 counters = alloc_counters(table);
1625 if (IS_ERR(counters)) 1616 if (IS_ERR(counters))
@@ -1629,9 +1620,12 @@ static int compat_copy_entries_to_user(unsigned int total_size,
1629 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1620 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1630 pos = userptr; 1621 pos = userptr;
1631 size = total_size; 1622 size = total_size;
1632 ret = ARPT_ENTRY_ITERATE(loc_cpu_entry, total_size, 1623 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1633 compat_copy_entry_to_user, 1624 ret = compat_copy_entry_to_user(iter, &pos,
1634 &pos, &size, counters, &i); 1625 &size, counters, i++);
1626 if (ret != 0)
1627 break;
1628 }
1635 vfree(counters); 1629 vfree(counters);
1636 return ret; 1630 return ret;
1637} 1631}
@@ -1799,12 +1793,7 @@ struct xt_table *arpt_register_table(struct net *net,
1799 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 1793 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1800 memcpy(loc_cpu_entry, repl->entries, repl->size); 1794 memcpy(loc_cpu_entry, repl->entries, repl->size);
1801 1795
1802 ret = translate_table(table->name, table->valid_hooks, 1796 ret = translate_table(newinfo, loc_cpu_entry, repl);
1803 newinfo, loc_cpu_entry, repl->size,
1804 repl->num_entries,
1805 repl->hook_entry,
1806 repl->underflow);
1807
1808 duprintf("arpt_register_table: translate table gives %d\n", ret); 1797 duprintf("arpt_register_table: translate table gives %d\n", ret);
1809 if (ret != 0) 1798 if (ret != 0)
1810 goto out_free; 1799 goto out_free;
@@ -1827,13 +1816,14 @@ void arpt_unregister_table(struct xt_table *table)
1827 struct xt_table_info *private; 1816 struct xt_table_info *private;
1828 void *loc_cpu_entry; 1817 void *loc_cpu_entry;
1829 struct module *table_owner = table->me; 1818 struct module *table_owner = table->me;
1819 struct arpt_entry *iter;
1830 1820
1831 private = xt_unregister_table(table); 1821 private = xt_unregister_table(table);
1832 1822
1833 /* Decrease module usage counts and free resources */ 1823 /* Decrease module usage counts and free resources */
1834 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1824 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1835 ARPT_ENTRY_ITERATE(loc_cpu_entry, private->size, 1825 xt_entry_foreach(iter, loc_cpu_entry, private->size)
1836 cleanup_entry, NULL); 1826 cleanup_entry(iter);
1837 if (private->number > private->initial_entries) 1827 if (private->number > private->initial_entries)
1838 module_put(table_owner); 1828 module_put(table_owner);
1839 xt_free_table_info(private); 1829 xt_free_table_info(private);
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index 97337601827a..bfe26f32b930 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/netfilter/x_tables.h>
9#include <linux/netfilter_arp/arp_tables.h> 10#include <linux/netfilter_arp/arp_tables.h>
10 11
11MODULE_LICENSE("GPL"); 12MODULE_LICENSE("GPL");
@@ -15,93 +16,37 @@ MODULE_DESCRIPTION("arptables filter table");
15#define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \ 16#define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \
16 (1 << NF_ARP_FORWARD)) 17 (1 << NF_ARP_FORWARD))
17 18
18static const struct
19{
20 struct arpt_replace repl;
21 struct arpt_standard entries[3];
22 struct arpt_error term;
23} initial_table __net_initdata = {
24 .repl = {
25 .name = "filter",
26 .valid_hooks = FILTER_VALID_HOOKS,
27 .num_entries = 4,
28 .size = sizeof(struct arpt_standard) * 3 + sizeof(struct arpt_error),
29 .hook_entry = {
30 [NF_ARP_IN] = 0,
31 [NF_ARP_OUT] = sizeof(struct arpt_standard),
32 [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard),
33 },
34 .underflow = {
35 [NF_ARP_IN] = 0,
36 [NF_ARP_OUT] = sizeof(struct arpt_standard),
37 [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard),
38 },
39 },
40 .entries = {
41 ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_IN */
42 ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_OUT */
43 ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_FORWARD */
44 },
45 .term = ARPT_ERROR_INIT,
46};
47
48static const struct xt_table packet_filter = { 19static const struct xt_table packet_filter = {
49 .name = "filter", 20 .name = "filter",
50 .valid_hooks = FILTER_VALID_HOOKS, 21 .valid_hooks = FILTER_VALID_HOOKS,
51 .me = THIS_MODULE, 22 .me = THIS_MODULE,
52 .af = NFPROTO_ARP, 23 .af = NFPROTO_ARP,
24 .priority = NF_IP_PRI_FILTER,
53}; 25};
54 26
55/* The work comes in here from netfilter.c */ 27/* The work comes in here from netfilter.c */
56static unsigned int arpt_in_hook(unsigned int hook, 28static unsigned int
57 struct sk_buff *skb, 29arptable_filter_hook(unsigned int hook, struct sk_buff *skb,
58 const struct net_device *in, 30 const struct net_device *in, const struct net_device *out,
59 const struct net_device *out, 31 int (*okfn)(struct sk_buff *))
60 int (*okfn)(struct sk_buff *))
61{ 32{
62 return arpt_do_table(skb, hook, in, out, 33 const struct net *net = dev_net((in != NULL) ? in : out);
63 dev_net(in)->ipv4.arptable_filter);
64}
65 34
66static unsigned int arpt_out_hook(unsigned int hook, 35 return arpt_do_table(skb, hook, in, out, net->ipv4.arptable_filter);
67 struct sk_buff *skb,
68 const struct net_device *in,
69 const struct net_device *out,
70 int (*okfn)(struct sk_buff *))
71{
72 return arpt_do_table(skb, hook, in, out,
73 dev_net(out)->ipv4.arptable_filter);
74} 36}
75 37
76static struct nf_hook_ops arpt_ops[] __read_mostly = { 38static struct nf_hook_ops *arpfilter_ops __read_mostly;
77 {
78 .hook = arpt_in_hook,
79 .owner = THIS_MODULE,
80 .pf = NFPROTO_ARP,
81 .hooknum = NF_ARP_IN,
82 .priority = NF_IP_PRI_FILTER,
83 },
84 {
85 .hook = arpt_out_hook,
86 .owner = THIS_MODULE,
87 .pf = NFPROTO_ARP,
88 .hooknum = NF_ARP_OUT,
89 .priority = NF_IP_PRI_FILTER,
90 },
91 {
92 .hook = arpt_in_hook,
93 .owner = THIS_MODULE,
94 .pf = NFPROTO_ARP,
95 .hooknum = NF_ARP_FORWARD,
96 .priority = NF_IP_PRI_FILTER,
97 },
98};
99 39
100static int __net_init arptable_filter_net_init(struct net *net) 40static int __net_init arptable_filter_net_init(struct net *net)
101{ 41{
102 /* Register table */ 42 struct arpt_replace *repl;
43
44 repl = arpt_alloc_initial_table(&packet_filter);
45 if (repl == NULL)
46 return -ENOMEM;
103 net->ipv4.arptable_filter = 47 net->ipv4.arptable_filter =
104 arpt_register_table(net, &packet_filter, &initial_table.repl); 48 arpt_register_table(net, &packet_filter, repl);
49 kfree(repl);
105 if (IS_ERR(net->ipv4.arptable_filter)) 50 if (IS_ERR(net->ipv4.arptable_filter))
106 return PTR_ERR(net->ipv4.arptable_filter); 51 return PTR_ERR(net->ipv4.arptable_filter);
107 return 0; 52 return 0;
@@ -125,9 +70,11 @@ static int __init arptable_filter_init(void)
125 if (ret < 0) 70 if (ret < 0)
126 return ret; 71 return ret;
127 72
128 ret = nf_register_hooks(arpt_ops, ARRAY_SIZE(arpt_ops)); 73 arpfilter_ops = xt_hook_link(&packet_filter, arptable_filter_hook);
129 if (ret < 0) 74 if (IS_ERR(arpfilter_ops)) {
75 ret = PTR_ERR(arpfilter_ops);
130 goto cleanup_table; 76 goto cleanup_table;
77 }
131 return ret; 78 return ret;
132 79
133cleanup_table: 80cleanup_table:
@@ -137,7 +84,7 @@ cleanup_table:
137 84
138static void __exit arptable_filter_fini(void) 85static void __exit arptable_filter_fini(void)
139{ 86{
140 nf_unregister_hooks(arpt_ops, ARRAY_SIZE(arpt_ops)); 87 xt_hook_unlink(&packet_filter, arpfilter_ops);
141 unregister_pernet_subsys(&arptable_filter_net_ops); 88 unregister_pernet_subsys(&arptable_filter_net_ops);
142} 89}
143 90
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 572330a552ef..b29c66df8d1f 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -28,6 +28,7 @@
28#include <linux/netfilter/x_tables.h> 28#include <linux/netfilter/x_tables.h>
29#include <linux/netfilter_ipv4/ip_tables.h> 29#include <linux/netfilter_ipv4/ip_tables.h>
30#include <net/netfilter/nf_log.h> 30#include <net/netfilter/nf_log.h>
31#include "../../netfilter/xt_repldata.h"
31 32
32MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
33MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 34MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -66,6 +67,12 @@ do { \
66#define inline 67#define inline
67#endif 68#endif
68 69
70void *ipt_alloc_initial_table(const struct xt_table *info)
71{
72 return xt_alloc_initial_table(ipt, IPT);
73}
74EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
75
69/* 76/*
70 We keep a set of rules for each CPU, so we can avoid write-locking 77 We keep a set of rules for each CPU, so we can avoid write-locking
71 them in the softirq when updating the counters and therefore 78 them in the softirq when updating the counters and therefore
@@ -169,7 +176,7 @@ ipt_error(struct sk_buff *skb, const struct xt_target_param *par)
169 176
170/* Performance critical - called for every packet */ 177/* Performance critical - called for every packet */
171static inline bool 178static inline bool
172do_match(struct ipt_entry_match *m, const struct sk_buff *skb, 179do_match(const struct ipt_entry_match *m, const struct sk_buff *skb,
173 struct xt_match_param *par) 180 struct xt_match_param *par)
174{ 181{
175 par->match = m->u.kernel.match; 182 par->match = m->u.kernel.match;
@@ -184,7 +191,7 @@ do_match(struct ipt_entry_match *m, const struct sk_buff *skb,
184 191
185/* Performance critical */ 192/* Performance critical */
186static inline struct ipt_entry * 193static inline struct ipt_entry *
187get_entry(void *base, unsigned int offset) 194get_entry(const void *base, unsigned int offset)
188{ 195{
189 return (struct ipt_entry *)(base + offset); 196 return (struct ipt_entry *)(base + offset);
190} 197}
@@ -199,6 +206,13 @@ static inline bool unconditional(const struct ipt_ip *ip)
199#undef FWINV 206#undef FWINV
200} 207}
201 208
209/* for const-correctness */
210static inline const struct ipt_entry_target *
211ipt_get_target_c(const struct ipt_entry *e)
212{
213 return ipt_get_target((struct ipt_entry *)e);
214}
215
202#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 216#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
203 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 217 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
204static const char *const hooknames[] = { 218static const char *const hooknames[] = {
@@ -233,11 +247,11 @@ static struct nf_loginfo trace_loginfo = {
233 247
234/* Mildly perf critical (only if packet tracing is on) */ 248/* Mildly perf critical (only if packet tracing is on) */
235static inline int 249static inline int
236get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e, 250get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
237 const char *hookname, const char **chainname, 251 const char *hookname, const char **chainname,
238 const char **comment, unsigned int *rulenum) 252 const char **comment, unsigned int *rulenum)
239{ 253{
240 struct ipt_standard_target *t = (void *)ipt_get_target(s); 254 const struct ipt_standard_target *t = (void *)ipt_get_target_c(s);
241 255
242 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) { 256 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
243 /* Head of user chain: ERROR target with chainname */ 257 /* Head of user chain: ERROR target with chainname */
@@ -263,17 +277,18 @@ get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
263 return 0; 277 return 0;
264} 278}
265 279
266static void trace_packet(struct sk_buff *skb, 280static void trace_packet(const struct sk_buff *skb,
267 unsigned int hook, 281 unsigned int hook,
268 const struct net_device *in, 282 const struct net_device *in,
269 const struct net_device *out, 283 const struct net_device *out,
270 const char *tablename, 284 const char *tablename,
271 struct xt_table_info *private, 285 const struct xt_table_info *private,
272 struct ipt_entry *e) 286 const struct ipt_entry *e)
273{ 287{
274 void *table_base; 288 const void *table_base;
275 const struct ipt_entry *root; 289 const struct ipt_entry *root;
276 const char *hookname, *chainname, *comment; 290 const char *hookname, *chainname, *comment;
291 const struct ipt_entry *iter;
277 unsigned int rulenum = 0; 292 unsigned int rulenum = 0;
278 293
279 table_base = private->entries[smp_processor_id()]; 294 table_base = private->entries[smp_processor_id()];
@@ -282,10 +297,10 @@ static void trace_packet(struct sk_buff *skb,
282 hookname = chainname = hooknames[hook]; 297 hookname = chainname = hooknames[hook];
283 comment = comments[NF_IP_TRACE_COMMENT_RULE]; 298 comment = comments[NF_IP_TRACE_COMMENT_RULE];
284 299
285 IPT_ENTRY_ITERATE(root, 300 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
286 private->size - private->hook_entry[hook], 301 if (get_chainname_rulenum(iter, e, hookname,
287 get_chainname_rulenum, 302 &chainname, &comment, &rulenum) != 0)
288 e, hookname, &chainname, &comment, &rulenum); 303 break;
289 304
290 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo, 305 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
291 "TRACE: %s:%s:%s:%u ", 306 "TRACE: %s:%s:%s:%u ",
@@ -315,9 +330,9 @@ ipt_do_table(struct sk_buff *skb,
315 /* Initializing verdict to NF_DROP keeps gcc happy. */ 330 /* Initializing verdict to NF_DROP keeps gcc happy. */
316 unsigned int verdict = NF_DROP; 331 unsigned int verdict = NF_DROP;
317 const char *indev, *outdev; 332 const char *indev, *outdev;
318 void *table_base; 333 const void *table_base;
319 struct ipt_entry *e, *back; 334 struct ipt_entry *e, *back;
320 struct xt_table_info *private; 335 const struct xt_table_info *private;
321 struct xt_match_param mtpar; 336 struct xt_match_param mtpar;
322 struct xt_target_param tgpar; 337 struct xt_target_param tgpar;
323 338
@@ -350,17 +365,22 @@ ipt_do_table(struct sk_buff *skb,
350 back = get_entry(table_base, private->underflow[hook]); 365 back = get_entry(table_base, private->underflow[hook]);
351 366
352 do { 367 do {
353 struct ipt_entry_target *t; 368 const struct ipt_entry_target *t;
369 const struct xt_entry_match *ematch;
354 370
355 IP_NF_ASSERT(e); 371 IP_NF_ASSERT(e);
356 IP_NF_ASSERT(back); 372 IP_NF_ASSERT(back);
357 if (!ip_packet_match(ip, indev, outdev, 373 if (!ip_packet_match(ip, indev, outdev,
358 &e->ip, mtpar.fragoff) || 374 &e->ip, mtpar.fragoff)) {
359 IPT_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) { 375 no_match:
360 e = ipt_next_entry(e); 376 e = ipt_next_entry(e);
361 continue; 377 continue;
362 } 378 }
363 379
380 xt_ematch_foreach(ematch, e)
381 if (do_match(ematch, skb, &mtpar) != 0)
382 goto no_match;
383
364 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1); 384 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
365 385
366 t = ipt_get_target(e); 386 t = ipt_get_target(e);
@@ -443,7 +463,7 @@ ipt_do_table(struct sk_buff *skb,
443/* Figures out from what hook each rule can be called: returns 0 if 463/* Figures out from what hook each rule can be called: returns 0 if
444 there are loops. Puts hook bitmask in comefrom. */ 464 there are loops. Puts hook bitmask in comefrom. */
445static int 465static int
446mark_source_chains(struct xt_table_info *newinfo, 466mark_source_chains(const struct xt_table_info *newinfo,
447 unsigned int valid_hooks, void *entry0) 467 unsigned int valid_hooks, void *entry0)
448{ 468{
449 unsigned int hook; 469 unsigned int hook;
@@ -461,8 +481,8 @@ mark_source_chains(struct xt_table_info *newinfo,
461 e->counters.pcnt = pos; 481 e->counters.pcnt = pos;
462 482
463 for (;;) { 483 for (;;) {
464 struct ipt_standard_target *t 484 const struct ipt_standard_target *t
465 = (void *)ipt_get_target(e); 485 = (void *)ipt_get_target_c(e);
466 int visited = e->comefrom & (1 << hook); 486 int visited = e->comefrom & (1 << hook);
467 487
468 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { 488 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
@@ -552,27 +572,23 @@ mark_source_chains(struct xt_table_info *newinfo,
552 return 1; 572 return 1;
553} 573}
554 574
555static int 575static void cleanup_match(struct ipt_entry_match *m, struct net *net)
556cleanup_match(struct ipt_entry_match *m, unsigned int *i)
557{ 576{
558 struct xt_mtdtor_param par; 577 struct xt_mtdtor_param par;
559 578
560 if (i && (*i)-- == 0) 579 par.net = net;
561 return 1;
562
563 par.match = m->u.kernel.match; 580 par.match = m->u.kernel.match;
564 par.matchinfo = m->data; 581 par.matchinfo = m->data;
565 par.family = NFPROTO_IPV4; 582 par.family = NFPROTO_IPV4;
566 if (par.match->destroy != NULL) 583 if (par.match->destroy != NULL)
567 par.match->destroy(&par); 584 par.match->destroy(&par);
568 module_put(par.match->me); 585 module_put(par.match->me);
569 return 0;
570} 586}
571 587
572static int 588static int
573check_entry(struct ipt_entry *e, const char *name) 589check_entry(const struct ipt_entry *e, const char *name)
574{ 590{
575 struct ipt_entry_target *t; 591 const struct ipt_entry_target *t;
576 592
577 if (!ip_checkentry(&e->ip)) { 593 if (!ip_checkentry(&e->ip)) {
578 duprintf("ip_tables: ip check failed %p %s.\n", e, name); 594 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
@@ -583,7 +599,7 @@ check_entry(struct ipt_entry *e, const char *name)
583 e->next_offset) 599 e->next_offset)
584 return -EINVAL; 600 return -EINVAL;
585 601
586 t = ipt_get_target(e); 602 t = ipt_get_target_c(e);
587 if (e->target_offset + t->u.target_size > e->next_offset) 603 if (e->target_offset + t->u.target_size > e->next_offset)
588 return -EINVAL; 604 return -EINVAL;
589 605
@@ -591,8 +607,7 @@ check_entry(struct ipt_entry *e, const char *name)
591} 607}
592 608
593static int 609static int
594check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par, 610check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par)
595 unsigned int *i)
596{ 611{
597 const struct ipt_ip *ip = par->entryinfo; 612 const struct ipt_ip *ip = par->entryinfo;
598 int ret; 613 int ret;
@@ -607,13 +622,11 @@ check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
607 par.match->name); 622 par.match->name);
608 return ret; 623 return ret;
609 } 624 }
610 ++*i;
611 return 0; 625 return 0;
612} 626}
613 627
614static int 628static int
615find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par, 629find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par)
616 unsigned int *i)
617{ 630{
618 struct xt_match *match; 631 struct xt_match *match;
619 int ret; 632 int ret;
@@ -627,7 +640,7 @@ find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
627 } 640 }
628 m->u.kernel.match = match; 641 m->u.kernel.match = match;
629 642
630 ret = check_match(m, par, i); 643 ret = check_match(m, par);
631 if (ret) 644 if (ret)
632 goto err; 645 goto err;
633 646
@@ -637,10 +650,11 @@ err:
637 return ret; 650 return ret;
638} 651}
639 652
640static int check_target(struct ipt_entry *e, const char *name) 653static int check_target(struct ipt_entry *e, struct net *net, const char *name)
641{ 654{
642 struct ipt_entry_target *t = ipt_get_target(e); 655 struct ipt_entry_target *t = ipt_get_target(e);
643 struct xt_tgchk_param par = { 656 struct xt_tgchk_param par = {
657 .net = net,
644 .table = name, 658 .table = name,
645 .entryinfo = e, 659 .entryinfo = e,
646 .target = t->u.kernel.target, 660 .target = t->u.kernel.target,
@@ -661,27 +675,32 @@ static int check_target(struct ipt_entry *e, const char *name)
661} 675}
662 676
663static int 677static int
664find_check_entry(struct ipt_entry *e, const char *name, unsigned int size, 678find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
665 unsigned int *i) 679 unsigned int size)
666{ 680{
667 struct ipt_entry_target *t; 681 struct ipt_entry_target *t;
668 struct xt_target *target; 682 struct xt_target *target;
669 int ret; 683 int ret;
670 unsigned int j; 684 unsigned int j;
671 struct xt_mtchk_param mtpar; 685 struct xt_mtchk_param mtpar;
686 struct xt_entry_match *ematch;
672 687
673 ret = check_entry(e, name); 688 ret = check_entry(e, name);
674 if (ret) 689 if (ret)
675 return ret; 690 return ret;
676 691
677 j = 0; 692 j = 0;
693 mtpar.net = net;
678 mtpar.table = name; 694 mtpar.table = name;
679 mtpar.entryinfo = &e->ip; 695 mtpar.entryinfo = &e->ip;
680 mtpar.hook_mask = e->comefrom; 696 mtpar.hook_mask = e->comefrom;
681 mtpar.family = NFPROTO_IPV4; 697 mtpar.family = NFPROTO_IPV4;
682 ret = IPT_MATCH_ITERATE(e, find_check_match, &mtpar, &j); 698 xt_ematch_foreach(ematch, e) {
683 if (ret != 0) 699 ret = find_check_match(ematch, &mtpar);
684 goto cleanup_matches; 700 if (ret != 0)
701 goto cleanup_matches;
702 ++j;
703 }
685 704
686 t = ipt_get_target(e); 705 t = ipt_get_target(e);
687 target = try_then_request_module(xt_find_target(AF_INET, 706 target = try_then_request_module(xt_find_target(AF_INET,
@@ -695,27 +714,29 @@ find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
695 } 714 }
696 t->u.kernel.target = target; 715 t->u.kernel.target = target;
697 716
698 ret = check_target(e, name); 717 ret = check_target(e, net, name);
699 if (ret) 718 if (ret)
700 goto err; 719 goto err;
701
702 (*i)++;
703 return 0; 720 return 0;
704 err: 721 err:
705 module_put(t->u.kernel.target->me); 722 module_put(t->u.kernel.target->me);
706 cleanup_matches: 723 cleanup_matches:
707 IPT_MATCH_ITERATE(e, cleanup_match, &j); 724 xt_ematch_foreach(ematch, e) {
725 if (j-- == 0)
726 break;
727 cleanup_match(ematch, net);
728 }
708 return ret; 729 return ret;
709} 730}
710 731
711static bool check_underflow(struct ipt_entry *e) 732static bool check_underflow(const struct ipt_entry *e)
712{ 733{
713 const struct ipt_entry_target *t; 734 const struct ipt_entry_target *t;
714 unsigned int verdict; 735 unsigned int verdict;
715 736
716 if (!unconditional(&e->ip)) 737 if (!unconditional(&e->ip))
717 return false; 738 return false;
718 t = ipt_get_target(e); 739 t = ipt_get_target_c(e);
719 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) 740 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
720 return false; 741 return false;
721 verdict = ((struct ipt_standard_target *)t)->verdict; 742 verdict = ((struct ipt_standard_target *)t)->verdict;
@@ -726,12 +747,11 @@ static bool check_underflow(struct ipt_entry *e)
726static int 747static int
727check_entry_size_and_hooks(struct ipt_entry *e, 748check_entry_size_and_hooks(struct ipt_entry *e,
728 struct xt_table_info *newinfo, 749 struct xt_table_info *newinfo,
729 unsigned char *base, 750 const unsigned char *base,
730 unsigned char *limit, 751 const unsigned char *limit,
731 const unsigned int *hook_entries, 752 const unsigned int *hook_entries,
732 const unsigned int *underflows, 753 const unsigned int *underflows,
733 unsigned int valid_hooks, 754 unsigned int valid_hooks)
734 unsigned int *i)
735{ 755{
736 unsigned int h; 756 unsigned int h;
737 757
@@ -768,50 +788,42 @@ check_entry_size_and_hooks(struct ipt_entry *e,
768 /* Clear counters and comefrom */ 788 /* Clear counters and comefrom */
769 e->counters = ((struct xt_counters) { 0, 0 }); 789 e->counters = ((struct xt_counters) { 0, 0 });
770 e->comefrom = 0; 790 e->comefrom = 0;
771
772 (*i)++;
773 return 0; 791 return 0;
774} 792}
775 793
776static int 794static void
777cleanup_entry(struct ipt_entry *e, unsigned int *i) 795cleanup_entry(struct ipt_entry *e, struct net *net)
778{ 796{
779 struct xt_tgdtor_param par; 797 struct xt_tgdtor_param par;
780 struct ipt_entry_target *t; 798 struct ipt_entry_target *t;
781 799 struct xt_entry_match *ematch;
782 if (i && (*i)-- == 0)
783 return 1;
784 800
785 /* Cleanup all matches */ 801 /* Cleanup all matches */
786 IPT_MATCH_ITERATE(e, cleanup_match, NULL); 802 xt_ematch_foreach(ematch, e)
803 cleanup_match(ematch, net);
787 t = ipt_get_target(e); 804 t = ipt_get_target(e);
788 805
806 par.net = net;
789 par.target = t->u.kernel.target; 807 par.target = t->u.kernel.target;
790 par.targinfo = t->data; 808 par.targinfo = t->data;
791 par.family = NFPROTO_IPV4; 809 par.family = NFPROTO_IPV4;
792 if (par.target->destroy != NULL) 810 if (par.target->destroy != NULL)
793 par.target->destroy(&par); 811 par.target->destroy(&par);
794 module_put(par.target->me); 812 module_put(par.target->me);
795 return 0;
796} 813}
797 814
798/* Checks and translates the user-supplied table segment (held in 815/* Checks and translates the user-supplied table segment (held in
799 newinfo) */ 816 newinfo) */
800static int 817static int
801translate_table(const char *name, 818translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
802 unsigned int valid_hooks, 819 const struct ipt_replace *repl)
803 struct xt_table_info *newinfo,
804 void *entry0,
805 unsigned int size,
806 unsigned int number,
807 const unsigned int *hook_entries,
808 const unsigned int *underflows)
809{ 820{
821 struct ipt_entry *iter;
810 unsigned int i; 822 unsigned int i;
811 int ret; 823 int ret = 0;
812 824
813 newinfo->size = size; 825 newinfo->size = repl->size;
814 newinfo->number = number; 826 newinfo->number = repl->num_entries;
815 827
816 /* Init all hooks to impossible value. */ 828 /* Init all hooks to impossible value. */
817 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 829 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
@@ -822,49 +834,58 @@ translate_table(const char *name,
822 duprintf("translate_table: size %u\n", newinfo->size); 834 duprintf("translate_table: size %u\n", newinfo->size);
823 i = 0; 835 i = 0;
824 /* Walk through entries, checking offsets. */ 836 /* Walk through entries, checking offsets. */
825 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size, 837 xt_entry_foreach(iter, entry0, newinfo->size) {
826 check_entry_size_and_hooks, 838 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
827 newinfo, 839 entry0 + repl->size,
828 entry0, 840 repl->hook_entry,
829 entry0 + size, 841 repl->underflow,
830 hook_entries, underflows, valid_hooks, &i); 842 repl->valid_hooks);
831 if (ret != 0) 843 if (ret != 0)
832 return ret; 844 return ret;
845 ++i;
846 }
833 847
834 if (i != number) { 848 if (i != repl->num_entries) {
835 duprintf("translate_table: %u not %u entries\n", 849 duprintf("translate_table: %u not %u entries\n",
836 i, number); 850 i, repl->num_entries);
837 return -EINVAL; 851 return -EINVAL;
838 } 852 }
839 853
840 /* Check hooks all assigned */ 854 /* Check hooks all assigned */
841 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 855 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
842 /* Only hooks which are valid */ 856 /* Only hooks which are valid */
843 if (!(valid_hooks & (1 << i))) 857 if (!(repl->valid_hooks & (1 << i)))
844 continue; 858 continue;
845 if (newinfo->hook_entry[i] == 0xFFFFFFFF) { 859 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
846 duprintf("Invalid hook entry %u %u\n", 860 duprintf("Invalid hook entry %u %u\n",
847 i, hook_entries[i]); 861 i, repl->hook_entry[i]);
848 return -EINVAL; 862 return -EINVAL;
849 } 863 }
850 if (newinfo->underflow[i] == 0xFFFFFFFF) { 864 if (newinfo->underflow[i] == 0xFFFFFFFF) {
851 duprintf("Invalid underflow %u %u\n", 865 duprintf("Invalid underflow %u %u\n",
852 i, underflows[i]); 866 i, repl->underflow[i]);
853 return -EINVAL; 867 return -EINVAL;
854 } 868 }
855 } 869 }
856 870
857 if (!mark_source_chains(newinfo, valid_hooks, entry0)) 871 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
858 return -ELOOP; 872 return -ELOOP;
859 873
860 /* Finally, each sanity check must pass */ 874 /* Finally, each sanity check must pass */
861 i = 0; 875 i = 0;
862 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size, 876 xt_entry_foreach(iter, entry0, newinfo->size) {
863 find_check_entry, name, size, &i); 877 ret = find_check_entry(iter, net, repl->name, repl->size);
878 if (ret != 0)
879 break;
880 ++i;
881 }
864 882
865 if (ret != 0) { 883 if (ret != 0) {
866 IPT_ENTRY_ITERATE(entry0, newinfo->size, 884 xt_entry_foreach(iter, entry0, newinfo->size) {
867 cleanup_entry, &i); 885 if (i-- == 0)
886 break;
887 cleanup_entry(iter, net);
888 }
868 return ret; 889 return ret;
869 } 890 }
870 891
@@ -877,33 +898,11 @@ translate_table(const char *name,
877 return ret; 898 return ret;
878} 899}
879 900
880/* Gets counters. */
881static inline int
882add_entry_to_counter(const struct ipt_entry *e,
883 struct xt_counters total[],
884 unsigned int *i)
885{
886 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
887
888 (*i)++;
889 return 0;
890}
891
892static inline int
893set_entry_to_counter(const struct ipt_entry *e,
894 struct ipt_counters total[],
895 unsigned int *i)
896{
897 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
898
899 (*i)++;
900 return 0;
901}
902
903static void 901static void
904get_counters(const struct xt_table_info *t, 902get_counters(const struct xt_table_info *t,
905 struct xt_counters counters[]) 903 struct xt_counters counters[])
906{ 904{
905 struct ipt_entry *iter;
907 unsigned int cpu; 906 unsigned int cpu;
908 unsigned int i; 907 unsigned int i;
909 unsigned int curcpu; 908 unsigned int curcpu;
@@ -919,32 +918,32 @@ get_counters(const struct xt_table_info *t,
919 curcpu = smp_processor_id(); 918 curcpu = smp_processor_id();
920 919
921 i = 0; 920 i = 0;
922 IPT_ENTRY_ITERATE(t->entries[curcpu], 921 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
923 t->size, 922 SET_COUNTER(counters[i], iter->counters.bcnt,
924 set_entry_to_counter, 923 iter->counters.pcnt);
925 counters, 924 ++i;
926 &i); 925 }
927 926
928 for_each_possible_cpu(cpu) { 927 for_each_possible_cpu(cpu) {
929 if (cpu == curcpu) 928 if (cpu == curcpu)
930 continue; 929 continue;
931 i = 0; 930 i = 0;
932 xt_info_wrlock(cpu); 931 xt_info_wrlock(cpu);
933 IPT_ENTRY_ITERATE(t->entries[cpu], 932 xt_entry_foreach(iter, t->entries[cpu], t->size) {
934 t->size, 933 ADD_COUNTER(counters[i], iter->counters.bcnt,
935 add_entry_to_counter, 934 iter->counters.pcnt);
936 counters, 935 ++i; /* macro does multi eval of i */
937 &i); 936 }
938 xt_info_wrunlock(cpu); 937 xt_info_wrunlock(cpu);
939 } 938 }
940 local_bh_enable(); 939 local_bh_enable();
941} 940}
942 941
943static struct xt_counters * alloc_counters(struct xt_table *table) 942static struct xt_counters *alloc_counters(const struct xt_table *table)
944{ 943{
945 unsigned int countersize; 944 unsigned int countersize;
946 struct xt_counters *counters; 945 struct xt_counters *counters;
947 struct xt_table_info *private = table->private; 946 const struct xt_table_info *private = table->private;
948 947
949 /* We need atomic snapshot of counters: rest doesn't change 948 /* We need atomic snapshot of counters: rest doesn't change
950 (other than comefrom, which userspace doesn't care 949 (other than comefrom, which userspace doesn't care
@@ -962,11 +961,11 @@ static struct xt_counters * alloc_counters(struct xt_table *table)
962 961
963static int 962static int
964copy_entries_to_user(unsigned int total_size, 963copy_entries_to_user(unsigned int total_size,
965 struct xt_table *table, 964 const struct xt_table *table,
966 void __user *userptr) 965 void __user *userptr)
967{ 966{
968 unsigned int off, num; 967 unsigned int off, num;
969 struct ipt_entry *e; 968 const struct ipt_entry *e;
970 struct xt_counters *counters; 969 struct xt_counters *counters;
971 const struct xt_table_info *private = table->private; 970 const struct xt_table_info *private = table->private;
972 int ret = 0; 971 int ret = 0;
@@ -1018,7 +1017,7 @@ copy_entries_to_user(unsigned int total_size,
1018 } 1017 }
1019 } 1018 }
1020 1019
1021 t = ipt_get_target(e); 1020 t = ipt_get_target_c(e);
1022 if (copy_to_user(userptr + off + e->target_offset 1021 if (copy_to_user(userptr + off + e->target_offset
1023 + offsetof(struct ipt_entry_target, 1022 + offsetof(struct ipt_entry_target,
1024 u.user.name), 1023 u.user.name),
@@ -1035,7 +1034,7 @@ copy_entries_to_user(unsigned int total_size,
1035} 1034}
1036 1035
1037#ifdef CONFIG_COMPAT 1036#ifdef CONFIG_COMPAT
1038static void compat_standard_from_user(void *dst, void *src) 1037static void compat_standard_from_user(void *dst, const void *src)
1039{ 1038{
1040 int v = *(compat_int_t *)src; 1039 int v = *(compat_int_t *)src;
1041 1040
@@ -1044,7 +1043,7 @@ static void compat_standard_from_user(void *dst, void *src)
1044 memcpy(dst, &v, sizeof(v)); 1043 memcpy(dst, &v, sizeof(v));
1045} 1044}
1046 1045
1047static int compat_standard_to_user(void __user *dst, void *src) 1046static int compat_standard_to_user(void __user *dst, const void *src)
1048{ 1047{
1049 compat_int_t cv = *(int *)src; 1048 compat_int_t cv = *(int *)src;
1050 1049
@@ -1053,25 +1052,20 @@ static int compat_standard_to_user(void __user *dst, void *src)
1053 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; 1052 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1054} 1053}
1055 1054
1056static inline int 1055static int compat_calc_entry(const struct ipt_entry *e,
1057compat_calc_match(struct ipt_entry_match *m, int *size)
1058{
1059 *size += xt_compat_match_offset(m->u.kernel.match);
1060 return 0;
1061}
1062
1063static int compat_calc_entry(struct ipt_entry *e,
1064 const struct xt_table_info *info, 1056 const struct xt_table_info *info,
1065 void *base, struct xt_table_info *newinfo) 1057 const void *base, struct xt_table_info *newinfo)
1066{ 1058{
1067 struct ipt_entry_target *t; 1059 const struct xt_entry_match *ematch;
1060 const struct ipt_entry_target *t;
1068 unsigned int entry_offset; 1061 unsigned int entry_offset;
1069 int off, i, ret; 1062 int off, i, ret;
1070 1063
1071 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1064 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1072 entry_offset = (void *)e - base; 1065 entry_offset = (void *)e - base;
1073 IPT_MATCH_ITERATE(e, compat_calc_match, &off); 1066 xt_ematch_foreach(ematch, e)
1074 t = ipt_get_target(e); 1067 off += xt_compat_match_offset(ematch->u.kernel.match);
1068 t = ipt_get_target_c(e);
1075 off += xt_compat_target_offset(t->u.kernel.target); 1069 off += xt_compat_target_offset(t->u.kernel.target);
1076 newinfo->size -= off; 1070 newinfo->size -= off;
1077 ret = xt_compat_add_offset(AF_INET, entry_offset, off); 1071 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
@@ -1092,7 +1086,9 @@ static int compat_calc_entry(struct ipt_entry *e,
1092static int compat_table_info(const struct xt_table_info *info, 1086static int compat_table_info(const struct xt_table_info *info,
1093 struct xt_table_info *newinfo) 1087 struct xt_table_info *newinfo)
1094{ 1088{
1089 struct ipt_entry *iter;
1095 void *loc_cpu_entry; 1090 void *loc_cpu_entry;
1091 int ret;
1096 1092
1097 if (!newinfo || !info) 1093 if (!newinfo || !info)
1098 return -EINVAL; 1094 return -EINVAL;
@@ -1101,13 +1097,17 @@ static int compat_table_info(const struct xt_table_info *info,
1101 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); 1097 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1102 newinfo->initial_entries = 0; 1098 newinfo->initial_entries = 0;
1103 loc_cpu_entry = info->entries[raw_smp_processor_id()]; 1099 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1104 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size, 1100 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1105 compat_calc_entry, info, loc_cpu_entry, 1101 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1106 newinfo); 1102 if (ret != 0)
1103 return ret;
1104 }
1105 return 0;
1107} 1106}
1108#endif 1107#endif
1109 1108
1110static int get_info(struct net *net, void __user *user, int *len, int compat) 1109static int get_info(struct net *net, void __user *user,
1110 const int *len, int compat)
1111{ 1111{
1112 char name[IPT_TABLE_MAXNAMELEN]; 1112 char name[IPT_TABLE_MAXNAMELEN];
1113 struct xt_table *t; 1113 struct xt_table *t;
@@ -1132,10 +1132,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
1132 if (t && !IS_ERR(t)) { 1132 if (t && !IS_ERR(t)) {
1133 struct ipt_getinfo info; 1133 struct ipt_getinfo info;
1134 const struct xt_table_info *private = t->private; 1134 const struct xt_table_info *private = t->private;
1135
1136#ifdef CONFIG_COMPAT 1135#ifdef CONFIG_COMPAT
1136 struct xt_table_info tmp;
1137
1137 if (compat) { 1138 if (compat) {
1138 struct xt_table_info tmp;
1139 ret = compat_table_info(private, &tmp); 1139 ret = compat_table_info(private, &tmp);
1140 xt_compat_flush_offsets(AF_INET); 1140 xt_compat_flush_offsets(AF_INET);
1141 private = &tmp; 1141 private = &tmp;
@@ -1167,7 +1167,8 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
1167} 1167}
1168 1168
1169static int 1169static int
1170get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len) 1170get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1171 const int *len)
1171{ 1172{
1172 int ret; 1173 int ret;
1173 struct ipt_get_entries get; 1174 struct ipt_get_entries get;
@@ -1215,6 +1216,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1215 struct xt_table_info *oldinfo; 1216 struct xt_table_info *oldinfo;
1216 struct xt_counters *counters; 1217 struct xt_counters *counters;
1217 void *loc_cpu_old_entry; 1218 void *loc_cpu_old_entry;
1219 struct ipt_entry *iter;
1218 1220
1219 ret = 0; 1221 ret = 0;
1220 counters = vmalloc(num_counters * sizeof(struct xt_counters)); 1222 counters = vmalloc(num_counters * sizeof(struct xt_counters));
@@ -1257,8 +1259,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1257 1259
1258 /* Decrease module usage counts and free resource */ 1260 /* Decrease module usage counts and free resource */
1259 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1261 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1260 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, 1262 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1261 NULL); 1263 cleanup_entry(iter, net);
1264
1262 xt_free_table_info(oldinfo); 1265 xt_free_table_info(oldinfo);
1263 if (copy_to_user(counters_ptr, counters, 1266 if (copy_to_user(counters_ptr, counters,
1264 sizeof(struct xt_counters) * num_counters) != 0) 1267 sizeof(struct xt_counters) * num_counters) != 0)
@@ -1277,12 +1280,13 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1277} 1280}
1278 1281
1279static int 1282static int
1280do_replace(struct net *net, void __user *user, unsigned int len) 1283do_replace(struct net *net, const void __user *user, unsigned int len)
1281{ 1284{
1282 int ret; 1285 int ret;
1283 struct ipt_replace tmp; 1286 struct ipt_replace tmp;
1284 struct xt_table_info *newinfo; 1287 struct xt_table_info *newinfo;
1285 void *loc_cpu_entry; 1288 void *loc_cpu_entry;
1289 struct ipt_entry *iter;
1286 1290
1287 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1291 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1288 return -EFAULT; 1292 return -EFAULT;
@@ -1303,9 +1307,7 @@ do_replace(struct net *net, void __user *user, unsigned int len)
1303 goto free_newinfo; 1307 goto free_newinfo;
1304 } 1308 }
1305 1309
1306 ret = translate_table(tmp.name, tmp.valid_hooks, 1310 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1307 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1308 tmp.hook_entry, tmp.underflow);
1309 if (ret != 0) 1311 if (ret != 0)
1310 goto free_newinfo; 1312 goto free_newinfo;
1311 1313
@@ -1318,27 +1320,16 @@ do_replace(struct net *net, void __user *user, unsigned int len)
1318 return 0; 1320 return 0;
1319 1321
1320 free_newinfo_untrans: 1322 free_newinfo_untrans:
1321 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1323 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1324 cleanup_entry(iter, net);
1322 free_newinfo: 1325 free_newinfo:
1323 xt_free_table_info(newinfo); 1326 xt_free_table_info(newinfo);
1324 return ret; 1327 return ret;
1325} 1328}
1326 1329
1327/* We're lazy, and add to the first CPU; overflow works its fey magic
1328 * and everything is OK. */
1329static int 1330static int
1330add_counter_to_entry(struct ipt_entry *e, 1331do_add_counters(struct net *net, const void __user *user,
1331 const struct xt_counters addme[], 1332 unsigned int len, int compat)
1332 unsigned int *i)
1333{
1334 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1335
1336 (*i)++;
1337 return 0;
1338}
1339
1340static int
1341do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
1342{ 1333{
1343 unsigned int i, curcpu; 1334 unsigned int i, curcpu;
1344 struct xt_counters_info tmp; 1335 struct xt_counters_info tmp;
@@ -1351,6 +1342,7 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
1351 const struct xt_table_info *private; 1342 const struct xt_table_info *private;
1352 int ret = 0; 1343 int ret = 0;
1353 void *loc_cpu_entry; 1344 void *loc_cpu_entry;
1345 struct ipt_entry *iter;
1354#ifdef CONFIG_COMPAT 1346#ifdef CONFIG_COMPAT
1355 struct compat_xt_counters_info compat_tmp; 1347 struct compat_xt_counters_info compat_tmp;
1356 1348
@@ -1408,11 +1400,10 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
1408 curcpu = smp_processor_id(); 1400 curcpu = smp_processor_id();
1409 loc_cpu_entry = private->entries[curcpu]; 1401 loc_cpu_entry = private->entries[curcpu];
1410 xt_info_wrlock(curcpu); 1402 xt_info_wrlock(curcpu);
1411 IPT_ENTRY_ITERATE(loc_cpu_entry, 1403 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1412 private->size, 1404 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1413 add_counter_to_entry, 1405 ++i;
1414 paddc, 1406 }
1415 &i);
1416 xt_info_wrunlock(curcpu); 1407 xt_info_wrunlock(curcpu);
1417 unlock_up_free: 1408 unlock_up_free:
1418 local_bh_enable(); 1409 local_bh_enable();
@@ -1440,45 +1431,40 @@ struct compat_ipt_replace {
1440static int 1431static int
1441compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr, 1432compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1442 unsigned int *size, struct xt_counters *counters, 1433 unsigned int *size, struct xt_counters *counters,
1443 unsigned int *i) 1434 unsigned int i)
1444{ 1435{
1445 struct ipt_entry_target *t; 1436 struct ipt_entry_target *t;
1446 struct compat_ipt_entry __user *ce; 1437 struct compat_ipt_entry __user *ce;
1447 u_int16_t target_offset, next_offset; 1438 u_int16_t target_offset, next_offset;
1448 compat_uint_t origsize; 1439 compat_uint_t origsize;
1449 int ret; 1440 const struct xt_entry_match *ematch;
1441 int ret = 0;
1450 1442
1451 ret = -EFAULT;
1452 origsize = *size; 1443 origsize = *size;
1453 ce = (struct compat_ipt_entry __user *)*dstptr; 1444 ce = (struct compat_ipt_entry __user *)*dstptr;
1454 if (copy_to_user(ce, e, sizeof(struct ipt_entry))) 1445 if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
1455 goto out; 1446 copy_to_user(&ce->counters, &counters[i],
1456 1447 sizeof(counters[i])) != 0)
1457 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i]))) 1448 return -EFAULT;
1458 goto out;
1459 1449
1460 *dstptr += sizeof(struct compat_ipt_entry); 1450 *dstptr += sizeof(struct compat_ipt_entry);
1461 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1451 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1462 1452
1463 ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size); 1453 xt_ematch_foreach(ematch, e) {
1454 ret = xt_compat_match_to_user(ematch, dstptr, size);
1455 if (ret != 0)
1456 return ret;
1457 }
1464 target_offset = e->target_offset - (origsize - *size); 1458 target_offset = e->target_offset - (origsize - *size);
1465 if (ret)
1466 goto out;
1467 t = ipt_get_target(e); 1459 t = ipt_get_target(e);
1468 ret = xt_compat_target_to_user(t, dstptr, size); 1460 ret = xt_compat_target_to_user(t, dstptr, size);
1469 if (ret) 1461 if (ret)
1470 goto out; 1462 return ret;
1471 ret = -EFAULT;
1472 next_offset = e->next_offset - (origsize - *size); 1463 next_offset = e->next_offset - (origsize - *size);
1473 if (put_user(target_offset, &ce->target_offset)) 1464 if (put_user(target_offset, &ce->target_offset) != 0 ||
1474 goto out; 1465 put_user(next_offset, &ce->next_offset) != 0)
1475 if (put_user(next_offset, &ce->next_offset)) 1466 return -EFAULT;
1476 goto out;
1477
1478 (*i)++;
1479 return 0; 1467 return 0;
1480out:
1481 return ret;
1482} 1468}
1483 1469
1484static int 1470static int
@@ -1486,7 +1472,7 @@ compat_find_calc_match(struct ipt_entry_match *m,
1486 const char *name, 1472 const char *name,
1487 const struct ipt_ip *ip, 1473 const struct ipt_ip *ip,
1488 unsigned int hookmask, 1474 unsigned int hookmask,
1489 int *size, unsigned int *i) 1475 int *size)
1490{ 1476{
1491 struct xt_match *match; 1477 struct xt_match *match;
1492 1478
@@ -1500,47 +1486,32 @@ compat_find_calc_match(struct ipt_entry_match *m,
1500 } 1486 }
1501 m->u.kernel.match = match; 1487 m->u.kernel.match = match;
1502 *size += xt_compat_match_offset(match); 1488 *size += xt_compat_match_offset(match);
1503
1504 (*i)++;
1505 return 0;
1506}
1507
1508static int
1509compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1510{
1511 if (i && (*i)-- == 0)
1512 return 1;
1513
1514 module_put(m->u.kernel.match->me);
1515 return 0; 1489 return 0;
1516} 1490}
1517 1491
1518static int 1492static void compat_release_entry(struct compat_ipt_entry *e)
1519compat_release_entry(struct compat_ipt_entry *e, unsigned int *i)
1520{ 1493{
1521 struct ipt_entry_target *t; 1494 struct ipt_entry_target *t;
1522 1495 struct xt_entry_match *ematch;
1523 if (i && (*i)-- == 0)
1524 return 1;
1525 1496
1526 /* Cleanup all matches */ 1497 /* Cleanup all matches */
1527 COMPAT_IPT_MATCH_ITERATE(e, compat_release_match, NULL); 1498 xt_ematch_foreach(ematch, e)
1499 module_put(ematch->u.kernel.match->me);
1528 t = compat_ipt_get_target(e); 1500 t = compat_ipt_get_target(e);
1529 module_put(t->u.kernel.target->me); 1501 module_put(t->u.kernel.target->me);
1530 return 0;
1531} 1502}
1532 1503
1533static int 1504static int
1534check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, 1505check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1535 struct xt_table_info *newinfo, 1506 struct xt_table_info *newinfo,
1536 unsigned int *size, 1507 unsigned int *size,
1537 unsigned char *base, 1508 const unsigned char *base,
1538 unsigned char *limit, 1509 const unsigned char *limit,
1539 unsigned int *hook_entries, 1510 const unsigned int *hook_entries,
1540 unsigned int *underflows, 1511 const unsigned int *underflows,
1541 unsigned int *i,
1542 const char *name) 1512 const char *name)
1543{ 1513{
1514 struct xt_entry_match *ematch;
1544 struct ipt_entry_target *t; 1515 struct ipt_entry_target *t;
1545 struct xt_target *target; 1516 struct xt_target *target;
1546 unsigned int entry_offset; 1517 unsigned int entry_offset;
@@ -1569,10 +1540,13 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1569 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1540 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1570 entry_offset = (void *)e - (void *)base; 1541 entry_offset = (void *)e - (void *)base;
1571 j = 0; 1542 j = 0;
1572 ret = COMPAT_IPT_MATCH_ITERATE(e, compat_find_calc_match, name, 1543 xt_ematch_foreach(ematch, e) {
1573 &e->ip, e->comefrom, &off, &j); 1544 ret = compat_find_calc_match(ematch, name,
1574 if (ret != 0) 1545 &e->ip, e->comefrom, &off);
1575 goto release_matches; 1546 if (ret != 0)
1547 goto release_matches;
1548 ++j;
1549 }
1576 1550
1577 t = compat_ipt_get_target(e); 1551 t = compat_ipt_get_target(e);
1578 target = try_then_request_module(xt_find_target(AF_INET, 1552 target = try_then_request_module(xt_find_target(AF_INET,
@@ -1604,14 +1578,16 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1604 /* Clear counters and comefrom */ 1578 /* Clear counters and comefrom */
1605 memset(&e->counters, 0, sizeof(e->counters)); 1579 memset(&e->counters, 0, sizeof(e->counters));
1606 e->comefrom = 0; 1580 e->comefrom = 0;
1607
1608 (*i)++;
1609 return 0; 1581 return 0;
1610 1582
1611out: 1583out:
1612 module_put(t->u.kernel.target->me); 1584 module_put(t->u.kernel.target->me);
1613release_matches: 1585release_matches:
1614 IPT_MATCH_ITERATE(e, compat_release_match, &j); 1586 xt_ematch_foreach(ematch, e) {
1587 if (j-- == 0)
1588 break;
1589 module_put(ematch->u.kernel.match->me);
1590 }
1615 return ret; 1591 return ret;
1616} 1592}
1617 1593
@@ -1625,6 +1601,7 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1625 struct ipt_entry *de; 1601 struct ipt_entry *de;
1626 unsigned int origsize; 1602 unsigned int origsize;
1627 int ret, h; 1603 int ret, h;
1604 struct xt_entry_match *ematch;
1628 1605
1629 ret = 0; 1606 ret = 0;
1630 origsize = *size; 1607 origsize = *size;
@@ -1635,10 +1612,11 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1635 *dstptr += sizeof(struct ipt_entry); 1612 *dstptr += sizeof(struct ipt_entry);
1636 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1613 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1637 1614
1638 ret = COMPAT_IPT_MATCH_ITERATE(e, xt_compat_match_from_user, 1615 xt_ematch_foreach(ematch, e) {
1639 dstptr, size); 1616 ret = xt_compat_match_from_user(ematch, dstptr, size);
1640 if (ret) 1617 if (ret != 0)
1641 return ret; 1618 return ret;
1619 }
1642 de->target_offset = e->target_offset - (origsize - *size); 1620 de->target_offset = e->target_offset - (origsize - *size);
1643 t = compat_ipt_get_target(e); 1621 t = compat_ipt_get_target(e);
1644 target = t->u.kernel.target; 1622 target = t->u.kernel.target;
@@ -1655,36 +1633,43 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1655} 1633}
1656 1634
1657static int 1635static int
1658compat_check_entry(struct ipt_entry *e, const char *name, 1636compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
1659 unsigned int *i)
1660{ 1637{
1638 struct xt_entry_match *ematch;
1661 struct xt_mtchk_param mtpar; 1639 struct xt_mtchk_param mtpar;
1662 unsigned int j; 1640 unsigned int j;
1663 int ret; 1641 int ret = 0;
1664 1642
1665 j = 0; 1643 j = 0;
1644 mtpar.net = net;
1666 mtpar.table = name; 1645 mtpar.table = name;
1667 mtpar.entryinfo = &e->ip; 1646 mtpar.entryinfo = &e->ip;
1668 mtpar.hook_mask = e->comefrom; 1647 mtpar.hook_mask = e->comefrom;
1669 mtpar.family = NFPROTO_IPV4; 1648 mtpar.family = NFPROTO_IPV4;
1670 ret = IPT_MATCH_ITERATE(e, check_match, &mtpar, &j); 1649 xt_ematch_foreach(ematch, e) {
1671 if (ret) 1650 ret = check_match(ematch, &mtpar);
1672 goto cleanup_matches; 1651 if (ret != 0)
1652 goto cleanup_matches;
1653 ++j;
1654 }
1673 1655
1674 ret = check_target(e, name); 1656 ret = check_target(e, net, name);
1675 if (ret) 1657 if (ret)
1676 goto cleanup_matches; 1658 goto cleanup_matches;
1677
1678 (*i)++;
1679 return 0; 1659 return 0;
1680 1660
1681 cleanup_matches: 1661 cleanup_matches:
1682 IPT_MATCH_ITERATE(e, cleanup_match, &j); 1662 xt_ematch_foreach(ematch, e) {
1663 if (j-- == 0)
1664 break;
1665 cleanup_match(ematch, net);
1666 }
1683 return ret; 1667 return ret;
1684} 1668}
1685 1669
1686static int 1670static int
1687translate_compat_table(const char *name, 1671translate_compat_table(struct net *net,
1672 const char *name,
1688 unsigned int valid_hooks, 1673 unsigned int valid_hooks,
1689 struct xt_table_info **pinfo, 1674 struct xt_table_info **pinfo,
1690 void **pentry0, 1675 void **pentry0,
@@ -1696,6 +1681,8 @@ translate_compat_table(const char *name,
1696 unsigned int i, j; 1681 unsigned int i, j;
1697 struct xt_table_info *newinfo, *info; 1682 struct xt_table_info *newinfo, *info;
1698 void *pos, *entry0, *entry1; 1683 void *pos, *entry0, *entry1;
1684 struct compat_ipt_entry *iter0;
1685 struct ipt_entry *iter1;
1699 unsigned int size; 1686 unsigned int size;
1700 int ret; 1687 int ret;
1701 1688
@@ -1714,13 +1701,17 @@ translate_compat_table(const char *name,
1714 j = 0; 1701 j = 0;
1715 xt_compat_lock(AF_INET); 1702 xt_compat_lock(AF_INET);
1716 /* Walk through entries, checking offsets. */ 1703 /* Walk through entries, checking offsets. */
1717 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, 1704 xt_entry_foreach(iter0, entry0, total_size) {
1718 check_compat_entry_size_and_hooks, 1705 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1719 info, &size, entry0, 1706 entry0,
1720 entry0 + total_size, 1707 entry0 + total_size,
1721 hook_entries, underflows, &j, name); 1708 hook_entries,
1722 if (ret != 0) 1709 underflows,
1723 goto out_unlock; 1710 name);
1711 if (ret != 0)
1712 goto out_unlock;
1713 ++j;
1714 }
1724 1715
1725 ret = -EINVAL; 1716 ret = -EINVAL;
1726 if (j != number) { 1717 if (j != number) {
@@ -1759,9 +1750,12 @@ translate_compat_table(const char *name,
1759 entry1 = newinfo->entries[raw_smp_processor_id()]; 1750 entry1 = newinfo->entries[raw_smp_processor_id()];
1760 pos = entry1; 1751 pos = entry1;
1761 size = total_size; 1752 size = total_size;
1762 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, 1753 xt_entry_foreach(iter0, entry0, total_size) {
1763 compat_copy_entry_from_user, 1754 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1764 &pos, &size, name, newinfo, entry1); 1755 name, newinfo, entry1);
1756 if (ret != 0)
1757 break;
1758 }
1765 xt_compat_flush_offsets(AF_INET); 1759 xt_compat_flush_offsets(AF_INET);
1766 xt_compat_unlock(AF_INET); 1760 xt_compat_unlock(AF_INET);
1767 if (ret) 1761 if (ret)
@@ -1772,13 +1766,32 @@ translate_compat_table(const char *name,
1772 goto free_newinfo; 1766 goto free_newinfo;
1773 1767
1774 i = 0; 1768 i = 0;
1775 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry, 1769 xt_entry_foreach(iter1, entry1, newinfo->size) {
1776 name, &i); 1770 ret = compat_check_entry(iter1, net, name);
1771 if (ret != 0)
1772 break;
1773 ++i;
1774 }
1777 if (ret) { 1775 if (ret) {
1776 /*
1777 * The first i matches need cleanup_entry (calls ->destroy)
1778 * because they had called ->check already. The other j-i
1779 * entries need only release.
1780 */
1781 int skip = i;
1778 j -= i; 1782 j -= i;
1779 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i, 1783 xt_entry_foreach(iter0, entry0, newinfo->size) {
1780 compat_release_entry, &j); 1784 if (skip-- > 0)
1781 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i); 1785 continue;
1786 if (j-- == 0)
1787 break;
1788 compat_release_entry(iter0);
1789 }
1790 xt_entry_foreach(iter1, entry1, newinfo->size) {
1791 if (i-- == 0)
1792 break;
1793 cleanup_entry(iter1, net);
1794 }
1782 xt_free_table_info(newinfo); 1795 xt_free_table_info(newinfo);
1783 return ret; 1796 return ret;
1784 } 1797 }
@@ -1796,7 +1809,11 @@ translate_compat_table(const char *name,
1796free_newinfo: 1809free_newinfo:
1797 xt_free_table_info(newinfo); 1810 xt_free_table_info(newinfo);
1798out: 1811out:
1799 COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j); 1812 xt_entry_foreach(iter0, entry0, total_size) {
1813 if (j-- == 0)
1814 break;
1815 compat_release_entry(iter0);
1816 }
1800 return ret; 1817 return ret;
1801out_unlock: 1818out_unlock:
1802 xt_compat_flush_offsets(AF_INET); 1819 xt_compat_flush_offsets(AF_INET);
@@ -1811,6 +1828,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1811 struct compat_ipt_replace tmp; 1828 struct compat_ipt_replace tmp;
1812 struct xt_table_info *newinfo; 1829 struct xt_table_info *newinfo;
1813 void *loc_cpu_entry; 1830 void *loc_cpu_entry;
1831 struct ipt_entry *iter;
1814 1832
1815 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1833 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1816 return -EFAULT; 1834 return -EFAULT;
@@ -1833,7 +1851,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1833 goto free_newinfo; 1851 goto free_newinfo;
1834 } 1852 }
1835 1853
1836 ret = translate_compat_table(tmp.name, tmp.valid_hooks, 1854 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1837 &newinfo, &loc_cpu_entry, tmp.size, 1855 &newinfo, &loc_cpu_entry, tmp.size,
1838 tmp.num_entries, tmp.hook_entry, 1856 tmp.num_entries, tmp.hook_entry,
1839 tmp.underflow); 1857 tmp.underflow);
@@ -1849,7 +1867,8 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1849 return 0; 1867 return 0;
1850 1868
1851 free_newinfo_untrans: 1869 free_newinfo_untrans:
1852 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1870 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1871 cleanup_entry(iter, net);
1853 free_newinfo: 1872 free_newinfo:
1854 xt_free_table_info(newinfo); 1873 xt_free_table_info(newinfo);
1855 return ret; 1874 return ret;
@@ -1898,6 +1917,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1898 int ret = 0; 1917 int ret = 0;
1899 const void *loc_cpu_entry; 1918 const void *loc_cpu_entry;
1900 unsigned int i = 0; 1919 unsigned int i = 0;
1920 struct ipt_entry *iter;
1901 1921
1902 counters = alloc_counters(table); 1922 counters = alloc_counters(table);
1903 if (IS_ERR(counters)) 1923 if (IS_ERR(counters))
@@ -1910,9 +1930,12 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1910 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1930 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1911 pos = userptr; 1931 pos = userptr;
1912 size = total_size; 1932 size = total_size;
1913 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size, 1933 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1914 compat_copy_entry_to_user, 1934 ret = compat_copy_entry_to_user(iter, &pos,
1915 &pos, &size, counters, &i); 1935 &size, counters, i++);
1936 if (ret != 0)
1937 break;
1938 }
1916 1939
1917 vfree(counters); 1940 vfree(counters);
1918 return ret; 1941 return ret;
@@ -2086,11 +2109,7 @@ struct xt_table *ipt_register_table(struct net *net,
2086 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 2109 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2087 memcpy(loc_cpu_entry, repl->entries, repl->size); 2110 memcpy(loc_cpu_entry, repl->entries, repl->size);
2088 2111
2089 ret = translate_table(table->name, table->valid_hooks, 2112 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2090 newinfo, loc_cpu_entry, repl->size,
2091 repl->num_entries,
2092 repl->hook_entry,
2093 repl->underflow);
2094 if (ret != 0) 2113 if (ret != 0)
2095 goto out_free; 2114 goto out_free;
2096 2115
@@ -2108,17 +2127,19 @@ out:
2108 return ERR_PTR(ret); 2127 return ERR_PTR(ret);
2109} 2128}
2110 2129
2111void ipt_unregister_table(struct xt_table *table) 2130void ipt_unregister_table(struct net *net, struct xt_table *table)
2112{ 2131{
2113 struct xt_table_info *private; 2132 struct xt_table_info *private;
2114 void *loc_cpu_entry; 2133 void *loc_cpu_entry;
2115 struct module *table_owner = table->me; 2134 struct module *table_owner = table->me;
2135 struct ipt_entry *iter;
2116 2136
2117 private = xt_unregister_table(table); 2137 private = xt_unregister_table(table);
2118 2138
2119 /* Decrease module usage counts and free resources */ 2139 /* Decrease module usage counts and free resources */
2120 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 2140 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2121 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL); 2141 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2142 cleanup_entry(iter, net);
2122 if (private->number > private->initial_entries) 2143 if (private->number > private->initial_entries)
2123 module_put(table_owner); 2144 module_put(table_owner);
2124 xt_free_table_info(private); 2145 xt_free_table_info(private);
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 40ca2d240abb..0886f96c736b 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -560,8 +560,7 @@ struct clusterip_seq_position {
560 560
561static void *clusterip_seq_start(struct seq_file *s, loff_t *pos) 561static void *clusterip_seq_start(struct seq_file *s, loff_t *pos)
562{ 562{
563 const struct proc_dir_entry *pde = s->private; 563 struct clusterip_config *c = s->private;
564 struct clusterip_config *c = pde->data;
565 unsigned int weight; 564 unsigned int weight;
566 u_int32_t local_nodes; 565 u_int32_t local_nodes;
567 struct clusterip_seq_position *idx; 566 struct clusterip_seq_position *idx;
@@ -632,10 +631,9 @@ static int clusterip_proc_open(struct inode *inode, struct file *file)
632 631
633 if (!ret) { 632 if (!ret) {
634 struct seq_file *sf = file->private_data; 633 struct seq_file *sf = file->private_data;
635 struct proc_dir_entry *pde = PDE(inode); 634 struct clusterip_config *c = PDE(inode)->data;
636 struct clusterip_config *c = pde->data;
637 635
638 sf->private = pde; 636 sf->private = c;
639 637
640 clusterip_config_get(c); 638 clusterip_config_get(c);
641 } 639 }
@@ -645,8 +643,7 @@ static int clusterip_proc_open(struct inode *inode, struct file *file)
645 643
646static int clusterip_proc_release(struct inode *inode, struct file *file) 644static int clusterip_proc_release(struct inode *inode, struct file *file)
647{ 645{
648 struct proc_dir_entry *pde = PDE(inode); 646 struct clusterip_config *c = PDE(inode)->data;
649 struct clusterip_config *c = pde->data;
650 int ret; 647 int ret;
651 648
652 ret = seq_release(inode, file); 649 ret = seq_release(inode, file);
@@ -660,10 +657,9 @@ static int clusterip_proc_release(struct inode *inode, struct file *file)
660static ssize_t clusterip_proc_write(struct file *file, const char __user *input, 657static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
661 size_t size, loff_t *ofs) 658 size_t size, loff_t *ofs)
662{ 659{
660 struct clusterip_config *c = PDE(file->f_path.dentry->d_inode)->data;
663#define PROC_WRITELEN 10 661#define PROC_WRITELEN 10
664 char buffer[PROC_WRITELEN+1]; 662 char buffer[PROC_WRITELEN+1];
665 const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
666 struct clusterip_config *c = pde->data;
667 unsigned long nodenum; 663 unsigned long nodenum;
668 664
669 if (copy_from_user(buffer, input, PROC_WRITELEN)) 665 if (copy_from_user(buffer, input, PROC_WRITELEN))
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 399061c3fd7d..09a5d3f7cc41 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -338,7 +338,7 @@ struct compat_ipt_ulog_info {
338 char prefix[ULOG_PREFIX_LEN]; 338 char prefix[ULOG_PREFIX_LEN];
339}; 339};
340 340
341static void ulog_tg_compat_from_user(void *dst, void *src) 341static void ulog_tg_compat_from_user(void *dst, const void *src)
342{ 342{
343 const struct compat_ipt_ulog_info *cl = src; 343 const struct compat_ipt_ulog_info *cl = src;
344 struct ipt_ulog_info l = { 344 struct ipt_ulog_info l = {
@@ -351,7 +351,7 @@ static void ulog_tg_compat_from_user(void *dst, void *src)
351 memcpy(dst, &l, sizeof(l)); 351 memcpy(dst, &l, sizeof(l));
352} 352}
353 353
354static int ulog_tg_compat_to_user(void __user *dst, void *src) 354static int ulog_tg_compat_to_user(void __user *dst, const void *src)
355{ 355{
356 const struct ipt_ulog_info *l = src; 356 const struct ipt_ulog_info *l = src;
357 struct compat_ipt_ulog_info cl = { 357 struct compat_ipt_ulog_info cl = {
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index df566cbd68e5..c8dc9800d620 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -23,104 +23,32 @@ MODULE_DESCRIPTION("iptables filter table");
23 (1 << NF_INET_FORWARD) | \ 23 (1 << NF_INET_FORWARD) | \
24 (1 << NF_INET_LOCAL_OUT)) 24 (1 << NF_INET_LOCAL_OUT))
25 25
26static struct
27{
28 struct ipt_replace repl;
29 struct ipt_standard entries[3];
30 struct ipt_error term;
31} initial_table __net_initdata = {
32 .repl = {
33 .name = "filter",
34 .valid_hooks = FILTER_VALID_HOOKS,
35 .num_entries = 4,
36 .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
37 .hook_entry = {
38 [NF_INET_LOCAL_IN] = 0,
39 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
40 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
41 },
42 .underflow = {
43 [NF_INET_LOCAL_IN] = 0,
44 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
45 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
46 },
47 },
48 .entries = {
49 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
50 IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
51 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
52 },
53 .term = IPT_ERROR_INIT, /* ERROR */
54};
55
56static const struct xt_table packet_filter = { 26static const struct xt_table packet_filter = {
57 .name = "filter", 27 .name = "filter",
58 .valid_hooks = FILTER_VALID_HOOKS, 28 .valid_hooks = FILTER_VALID_HOOKS,
59 .me = THIS_MODULE, 29 .me = THIS_MODULE,
60 .af = NFPROTO_IPV4, 30 .af = NFPROTO_IPV4,
31 .priority = NF_IP_PRI_FILTER,
61}; 32};
62 33
63/* The work comes in here from netfilter.c. */
64static unsigned int
65ipt_local_in_hook(unsigned int hook,
66 struct sk_buff *skb,
67 const struct net_device *in,
68 const struct net_device *out,
69 int (*okfn)(struct sk_buff *))
70{
71 return ipt_do_table(skb, hook, in, out,
72 dev_net(in)->ipv4.iptable_filter);
73}
74
75static unsigned int 34static unsigned int
76ipt_hook(unsigned int hook, 35iptable_filter_hook(unsigned int hook, struct sk_buff *skb,
77 struct sk_buff *skb, 36 const struct net_device *in, const struct net_device *out,
78 const struct net_device *in, 37 int (*okfn)(struct sk_buff *))
79 const struct net_device *out,
80 int (*okfn)(struct sk_buff *))
81{ 38{
82 return ipt_do_table(skb, hook, in, out, 39 const struct net *net;
83 dev_net(in)->ipv4.iptable_filter);
84}
85 40
86static unsigned int 41 if (hook == NF_INET_LOCAL_OUT &&
87ipt_local_out_hook(unsigned int hook, 42 (skb->len < sizeof(struct iphdr) ||
88 struct sk_buff *skb, 43 ip_hdrlen(skb) < sizeof(struct iphdr)))
89 const struct net_device *in, 44 /* root is playing with raw sockets. */
90 const struct net_device *out,
91 int (*okfn)(struct sk_buff *))
92{
93 /* root is playing with raw sockets. */
94 if (skb->len < sizeof(struct iphdr) ||
95 ip_hdrlen(skb) < sizeof(struct iphdr))
96 return NF_ACCEPT; 45 return NF_ACCEPT;
97 return ipt_do_table(skb, hook, in, out, 46
98 dev_net(out)->ipv4.iptable_filter); 47 net = dev_net((in != NULL) ? in : out);
48 return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_filter);
99} 49}
100 50
101static struct nf_hook_ops ipt_ops[] __read_mostly = { 51static struct nf_hook_ops *filter_ops __read_mostly;
102 {
103 .hook = ipt_local_in_hook,
104 .owner = THIS_MODULE,
105 .pf = NFPROTO_IPV4,
106 .hooknum = NF_INET_LOCAL_IN,
107 .priority = NF_IP_PRI_FILTER,
108 },
109 {
110 .hook = ipt_hook,
111 .owner = THIS_MODULE,
112 .pf = NFPROTO_IPV4,
113 .hooknum = NF_INET_FORWARD,
114 .priority = NF_IP_PRI_FILTER,
115 },
116 {
117 .hook = ipt_local_out_hook,
118 .owner = THIS_MODULE,
119 .pf = NFPROTO_IPV4,
120 .hooknum = NF_INET_LOCAL_OUT,
121 .priority = NF_IP_PRI_FILTER,
122 },
123};
124 52
125/* Default to forward because I got too much mail already. */ 53/* Default to forward because I got too much mail already. */
126static int forward = NF_ACCEPT; 54static int forward = NF_ACCEPT;
@@ -128,9 +56,18 @@ module_param(forward, bool, 0000);
128 56
129static int __net_init iptable_filter_net_init(struct net *net) 57static int __net_init iptable_filter_net_init(struct net *net)
130{ 58{
131 /* Register table */ 59 struct ipt_replace *repl;
60
61 repl = ipt_alloc_initial_table(&packet_filter);
62 if (repl == NULL)
63 return -ENOMEM;
64 /* Entry 1 is the FORWARD hook */
65 ((struct ipt_standard *)repl->entries)[1].target.verdict =
66 -forward - 1;
67
132 net->ipv4.iptable_filter = 68 net->ipv4.iptable_filter =
133 ipt_register_table(net, &packet_filter, &initial_table.repl); 69 ipt_register_table(net, &packet_filter, repl);
70 kfree(repl);
134 if (IS_ERR(net->ipv4.iptable_filter)) 71 if (IS_ERR(net->ipv4.iptable_filter))
135 return PTR_ERR(net->ipv4.iptable_filter); 72 return PTR_ERR(net->ipv4.iptable_filter);
136 return 0; 73 return 0;
@@ -138,7 +75,7 @@ static int __net_init iptable_filter_net_init(struct net *net)
138 75
139static void __net_exit iptable_filter_net_exit(struct net *net) 76static void __net_exit iptable_filter_net_exit(struct net *net)
140{ 77{
141 ipt_unregister_table(net->ipv4.iptable_filter); 78 ipt_unregister_table(net, net->ipv4.iptable_filter);
142} 79}
143 80
144static struct pernet_operations iptable_filter_net_ops = { 81static struct pernet_operations iptable_filter_net_ops = {
@@ -155,17 +92,16 @@ static int __init iptable_filter_init(void)
155 return -EINVAL; 92 return -EINVAL;
156 } 93 }
157 94
158 /* Entry 1 is the FORWARD hook */
159 initial_table.entries[1].target.verdict = -forward - 1;
160
161 ret = register_pernet_subsys(&iptable_filter_net_ops); 95 ret = register_pernet_subsys(&iptable_filter_net_ops);
162 if (ret < 0) 96 if (ret < 0)
163 return ret; 97 return ret;
164 98
165 /* Register hooks */ 99 /* Register hooks */
166 ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 100 filter_ops = xt_hook_link(&packet_filter, iptable_filter_hook);
167 if (ret < 0) 101 if (IS_ERR(filter_ops)) {
102 ret = PTR_ERR(filter_ops);
168 goto cleanup_table; 103 goto cleanup_table;
104 }
169 105
170 return ret; 106 return ret;
171 107
@@ -176,7 +112,7 @@ static int __init iptable_filter_init(void)
176 112
177static void __exit iptable_filter_fini(void) 113static void __exit iptable_filter_fini(void)
178{ 114{
179 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 115 xt_hook_unlink(&packet_filter, filter_ops);
180 unregister_pernet_subsys(&iptable_filter_net_ops); 116 unregister_pernet_subsys(&iptable_filter_net_ops);
181} 117}
182 118
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index fae78c3076c4..b9b83464cbf4 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -27,101 +27,16 @@ MODULE_DESCRIPTION("iptables mangle table");
27 (1 << NF_INET_LOCAL_OUT) | \ 27 (1 << NF_INET_LOCAL_OUT) | \
28 (1 << NF_INET_POST_ROUTING)) 28 (1 << NF_INET_POST_ROUTING))
29 29
30/* Ouch - five different hooks? Maybe this should be a config option..... -- BC */
31static const struct
32{
33 struct ipt_replace repl;
34 struct ipt_standard entries[5];
35 struct ipt_error term;
36} initial_table __net_initdata = {
37 .repl = {
38 .name = "mangle",
39 .valid_hooks = MANGLE_VALID_HOOKS,
40 .num_entries = 6,
41 .size = sizeof(struct ipt_standard) * 5 + sizeof(struct ipt_error),
42 .hook_entry = {
43 [NF_INET_PRE_ROUTING] = 0,
44 [NF_INET_LOCAL_IN] = sizeof(struct ipt_standard),
45 [NF_INET_FORWARD] = sizeof(struct ipt_standard) * 2,
46 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
47 [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard) * 4,
48 },
49 .underflow = {
50 [NF_INET_PRE_ROUTING] = 0,
51 [NF_INET_LOCAL_IN] = sizeof(struct ipt_standard),
52 [NF_INET_FORWARD] = sizeof(struct ipt_standard) * 2,
53 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
54 [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard) * 4,
55 },
56 },
57 .entries = {
58 IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
59 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
60 IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
61 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
62 IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
63 },
64 .term = IPT_ERROR_INIT, /* ERROR */
65};
66
67static const struct xt_table packet_mangler = { 30static const struct xt_table packet_mangler = {
68 .name = "mangle", 31 .name = "mangle",
69 .valid_hooks = MANGLE_VALID_HOOKS, 32 .valid_hooks = MANGLE_VALID_HOOKS,
70 .me = THIS_MODULE, 33 .me = THIS_MODULE,
71 .af = NFPROTO_IPV4, 34 .af = NFPROTO_IPV4,
35 .priority = NF_IP_PRI_MANGLE,
72}; 36};
73 37
74/* The work comes in here from netfilter.c. */
75static unsigned int
76ipt_pre_routing_hook(unsigned int hook,
77 struct sk_buff *skb,
78 const struct net_device *in,
79 const struct net_device *out,
80 int (*okfn)(struct sk_buff *))
81{
82 return ipt_do_table(skb, hook, in, out,
83 dev_net(in)->ipv4.iptable_mangle);
84}
85
86static unsigned int
87ipt_post_routing_hook(unsigned int hook,
88 struct sk_buff *skb,
89 const struct net_device *in,
90 const struct net_device *out,
91 int (*okfn)(struct sk_buff *))
92{
93 return ipt_do_table(skb, hook, in, out,
94 dev_net(out)->ipv4.iptable_mangle);
95}
96
97static unsigned int
98ipt_local_in_hook(unsigned int hook,
99 struct sk_buff *skb,
100 const struct net_device *in,
101 const struct net_device *out,
102 int (*okfn)(struct sk_buff *))
103{
104 return ipt_do_table(skb, hook, in, out,
105 dev_net(in)->ipv4.iptable_mangle);
106}
107
108static unsigned int
109ipt_forward_hook(unsigned int hook,
110 struct sk_buff *skb,
111 const struct net_device *in,
112 const struct net_device *out,
113 int (*okfn)(struct sk_buff *))
114{
115 return ipt_do_table(skb, hook, in, out,
116 dev_net(in)->ipv4.iptable_mangle);
117}
118
119static unsigned int 38static unsigned int
120ipt_local_hook(unsigned int hook, 39ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
121 struct sk_buff *skb,
122 const struct net_device *in,
123 const struct net_device *out,
124 int (*okfn)(struct sk_buff *))
125{ 40{
126 unsigned int ret; 41 unsigned int ret;
127 const struct iphdr *iph; 42 const struct iphdr *iph;
@@ -141,7 +56,7 @@ ipt_local_hook(unsigned int hook,
141 daddr = iph->daddr; 56 daddr = iph->daddr;
142 tos = iph->tos; 57 tos = iph->tos;
143 58
144 ret = ipt_do_table(skb, hook, in, out, 59 ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
145 dev_net(out)->ipv4.iptable_mangle); 60 dev_net(out)->ipv4.iptable_mangle);
146 /* Reroute for ANY change. */ 61 /* Reroute for ANY change. */
147 if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) { 62 if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) {
@@ -158,49 +73,36 @@ ipt_local_hook(unsigned int hook,
158 return ret; 73 return ret;
159} 74}
160 75
161static struct nf_hook_ops ipt_ops[] __read_mostly = { 76/* The work comes in here from netfilter.c. */
162 { 77static unsigned int
163 .hook = ipt_pre_routing_hook, 78iptable_mangle_hook(unsigned int hook,
164 .owner = THIS_MODULE, 79 struct sk_buff *skb,
165 .pf = NFPROTO_IPV4, 80 const struct net_device *in,
166 .hooknum = NF_INET_PRE_ROUTING, 81 const struct net_device *out,
167 .priority = NF_IP_PRI_MANGLE, 82 int (*okfn)(struct sk_buff *))
168 }, 83{
169 { 84 if (hook == NF_INET_LOCAL_OUT)
170 .hook = ipt_local_in_hook, 85 return ipt_mangle_out(skb, out);
171 .owner = THIS_MODULE, 86 if (hook == NF_INET_POST_ROUTING)
172 .pf = NFPROTO_IPV4, 87 return ipt_do_table(skb, hook, in, out,
173 .hooknum = NF_INET_LOCAL_IN, 88 dev_net(out)->ipv4.iptable_mangle);
174 .priority = NF_IP_PRI_MANGLE, 89 /* PREROUTING/INPUT/FORWARD: */
175 }, 90 return ipt_do_table(skb, hook, in, out,
176 { 91 dev_net(in)->ipv4.iptable_mangle);
177 .hook = ipt_forward_hook, 92}
178 .owner = THIS_MODULE, 93
179 .pf = NFPROTO_IPV4, 94static struct nf_hook_ops *mangle_ops __read_mostly;
180 .hooknum = NF_INET_FORWARD,
181 .priority = NF_IP_PRI_MANGLE,
182 },
183 {
184 .hook = ipt_local_hook,
185 .owner = THIS_MODULE,
186 .pf = NFPROTO_IPV4,
187 .hooknum = NF_INET_LOCAL_OUT,
188 .priority = NF_IP_PRI_MANGLE,
189 },
190 {
191 .hook = ipt_post_routing_hook,
192 .owner = THIS_MODULE,
193 .pf = NFPROTO_IPV4,
194 .hooknum = NF_INET_POST_ROUTING,
195 .priority = NF_IP_PRI_MANGLE,
196 },
197};
198 95
199static int __net_init iptable_mangle_net_init(struct net *net) 96static int __net_init iptable_mangle_net_init(struct net *net)
200{ 97{
201 /* Register table */ 98 struct ipt_replace *repl;
99
100 repl = ipt_alloc_initial_table(&packet_mangler);
101 if (repl == NULL)
102 return -ENOMEM;
202 net->ipv4.iptable_mangle = 103 net->ipv4.iptable_mangle =
203 ipt_register_table(net, &packet_mangler, &initial_table.repl); 104 ipt_register_table(net, &packet_mangler, repl);
105 kfree(repl);
204 if (IS_ERR(net->ipv4.iptable_mangle)) 106 if (IS_ERR(net->ipv4.iptable_mangle))
205 return PTR_ERR(net->ipv4.iptable_mangle); 107 return PTR_ERR(net->ipv4.iptable_mangle);
206 return 0; 108 return 0;
@@ -208,7 +110,7 @@ static int __net_init iptable_mangle_net_init(struct net *net)
208 110
209static void __net_exit iptable_mangle_net_exit(struct net *net) 111static void __net_exit iptable_mangle_net_exit(struct net *net)
210{ 112{
211 ipt_unregister_table(net->ipv4.iptable_mangle); 113 ipt_unregister_table(net, net->ipv4.iptable_mangle);
212} 114}
213 115
214static struct pernet_operations iptable_mangle_net_ops = { 116static struct pernet_operations iptable_mangle_net_ops = {
@@ -225,9 +127,11 @@ static int __init iptable_mangle_init(void)
225 return ret; 127 return ret;
226 128
227 /* Register hooks */ 129 /* Register hooks */
228 ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 130 mangle_ops = xt_hook_link(&packet_mangler, iptable_mangle_hook);
229 if (ret < 0) 131 if (IS_ERR(mangle_ops)) {
132 ret = PTR_ERR(mangle_ops);
230 goto cleanup_table; 133 goto cleanup_table;
134 }
231 135
232 return ret; 136 return ret;
233 137
@@ -238,7 +142,7 @@ static int __init iptable_mangle_init(void)
238 142
239static void __exit iptable_mangle_fini(void) 143static void __exit iptable_mangle_fini(void)
240{ 144{
241 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 145 xt_hook_unlink(&packet_mangler, mangle_ops);
242 unregister_pernet_subsys(&iptable_mangle_net_ops); 146 unregister_pernet_subsys(&iptable_mangle_net_ops);
243} 147}
244 148
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 993edc23be09..06fb9d11953c 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -9,90 +9,44 @@
9 9
10#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) 10#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
11 11
12static const struct
13{
14 struct ipt_replace repl;
15 struct ipt_standard entries[2];
16 struct ipt_error term;
17} initial_table __net_initdata = {
18 .repl = {
19 .name = "raw",
20 .valid_hooks = RAW_VALID_HOOKS,
21 .num_entries = 3,
22 .size = sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error),
23 .hook_entry = {
24 [NF_INET_PRE_ROUTING] = 0,
25 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard)
26 },
27 .underflow = {
28 [NF_INET_PRE_ROUTING] = 0,
29 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard)
30 },
31 },
32 .entries = {
33 IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
34 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
35 },
36 .term = IPT_ERROR_INIT, /* ERROR */
37};
38
39static const struct xt_table packet_raw = { 12static const struct xt_table packet_raw = {
40 .name = "raw", 13 .name = "raw",
41 .valid_hooks = RAW_VALID_HOOKS, 14 .valid_hooks = RAW_VALID_HOOKS,
42 .me = THIS_MODULE, 15 .me = THIS_MODULE,
43 .af = NFPROTO_IPV4, 16 .af = NFPROTO_IPV4,
17 .priority = NF_IP_PRI_RAW,
44}; 18};
45 19
46/* The work comes in here from netfilter.c. */ 20/* The work comes in here from netfilter.c. */
47static unsigned int 21static unsigned int
48ipt_hook(unsigned int hook, 22iptable_raw_hook(unsigned int hook, struct sk_buff *skb,
49 struct sk_buff *skb, 23 const struct net_device *in, const struct net_device *out,
50 const struct net_device *in, 24 int (*okfn)(struct sk_buff *))
51 const struct net_device *out,
52 int (*okfn)(struct sk_buff *))
53{ 25{
54 return ipt_do_table(skb, hook, in, out, 26 const struct net *net;
55 dev_net(in)->ipv4.iptable_raw);
56}
57 27
58static unsigned int 28 if (hook == NF_INET_LOCAL_OUT &&
59ipt_local_hook(unsigned int hook, 29 (skb->len < sizeof(struct iphdr) ||
60 struct sk_buff *skb, 30 ip_hdrlen(skb) < sizeof(struct iphdr)))
61 const struct net_device *in, 31 /* root is playing with raw sockets. */
62 const struct net_device *out,
63 int (*okfn)(struct sk_buff *))
64{
65 /* root is playing with raw sockets. */
66 if (skb->len < sizeof(struct iphdr) ||
67 ip_hdrlen(skb) < sizeof(struct iphdr))
68 return NF_ACCEPT; 32 return NF_ACCEPT;
69 return ipt_do_table(skb, hook, in, out, 33
70 dev_net(out)->ipv4.iptable_raw); 34 net = dev_net((in != NULL) ? in : out);
35 return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_raw);
71} 36}
72 37
73/* 'raw' is the very first table. */ 38static struct nf_hook_ops *rawtable_ops __read_mostly;
74static struct nf_hook_ops ipt_ops[] __read_mostly = {
75 {
76 .hook = ipt_hook,
77 .pf = NFPROTO_IPV4,
78 .hooknum = NF_INET_PRE_ROUTING,
79 .priority = NF_IP_PRI_RAW,
80 .owner = THIS_MODULE,
81 },
82 {
83 .hook = ipt_local_hook,
84 .pf = NFPROTO_IPV4,
85 .hooknum = NF_INET_LOCAL_OUT,
86 .priority = NF_IP_PRI_RAW,
87 .owner = THIS_MODULE,
88 },
89};
90 39
91static int __net_init iptable_raw_net_init(struct net *net) 40static int __net_init iptable_raw_net_init(struct net *net)
92{ 41{
93 /* Register table */ 42 struct ipt_replace *repl;
43
44 repl = ipt_alloc_initial_table(&packet_raw);
45 if (repl == NULL)
46 return -ENOMEM;
94 net->ipv4.iptable_raw = 47 net->ipv4.iptable_raw =
95 ipt_register_table(net, &packet_raw, &initial_table.repl); 48 ipt_register_table(net, &packet_raw, repl);
49 kfree(repl);
96 if (IS_ERR(net->ipv4.iptable_raw)) 50 if (IS_ERR(net->ipv4.iptable_raw))
97 return PTR_ERR(net->ipv4.iptable_raw); 51 return PTR_ERR(net->ipv4.iptable_raw);
98 return 0; 52 return 0;
@@ -100,7 +54,7 @@ static int __net_init iptable_raw_net_init(struct net *net)
100 54
101static void __net_exit iptable_raw_net_exit(struct net *net) 55static void __net_exit iptable_raw_net_exit(struct net *net)
102{ 56{
103 ipt_unregister_table(net->ipv4.iptable_raw); 57 ipt_unregister_table(net, net->ipv4.iptable_raw);
104} 58}
105 59
106static struct pernet_operations iptable_raw_net_ops = { 60static struct pernet_operations iptable_raw_net_ops = {
@@ -117,9 +71,11 @@ static int __init iptable_raw_init(void)
117 return ret; 71 return ret;
118 72
119 /* Register hooks */ 73 /* Register hooks */
120 ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 74 rawtable_ops = xt_hook_link(&packet_raw, iptable_raw_hook);
121 if (ret < 0) 75 if (IS_ERR(rawtable_ops)) {
76 ret = PTR_ERR(rawtable_ops);
122 goto cleanup_table; 77 goto cleanup_table;
78 }
123 79
124 return ret; 80 return ret;
125 81
@@ -130,7 +86,7 @@ static int __init iptable_raw_init(void)
130 86
131static void __exit iptable_raw_fini(void) 87static void __exit iptable_raw_fini(void)
132{ 88{
133 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 89 xt_hook_unlink(&packet_raw, rawtable_ops);
134 unregister_pernet_subsys(&iptable_raw_net_ops); 90 unregister_pernet_subsys(&iptable_raw_net_ops);
135} 91}
136 92
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index 3bd3d6388da5..cce2f64e6f21 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -27,109 +27,44 @@ MODULE_DESCRIPTION("iptables security table, for MAC rules");
27 (1 << NF_INET_FORWARD) | \ 27 (1 << NF_INET_FORWARD) | \
28 (1 << NF_INET_LOCAL_OUT) 28 (1 << NF_INET_LOCAL_OUT)
29 29
30static const struct
31{
32 struct ipt_replace repl;
33 struct ipt_standard entries[3];
34 struct ipt_error term;
35} initial_table __net_initdata = {
36 .repl = {
37 .name = "security",
38 .valid_hooks = SECURITY_VALID_HOOKS,
39 .num_entries = 4,
40 .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
41 .hook_entry = {
42 [NF_INET_LOCAL_IN] = 0,
43 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
44 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
45 },
46 .underflow = {
47 [NF_INET_LOCAL_IN] = 0,
48 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
49 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
50 },
51 },
52 .entries = {
53 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
54 IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
55 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
56 },
57 .term = IPT_ERROR_INIT, /* ERROR */
58};
59
60static const struct xt_table security_table = { 30static const struct xt_table security_table = {
61 .name = "security", 31 .name = "security",
62 .valid_hooks = SECURITY_VALID_HOOKS, 32 .valid_hooks = SECURITY_VALID_HOOKS,
63 .me = THIS_MODULE, 33 .me = THIS_MODULE,
64 .af = NFPROTO_IPV4, 34 .af = NFPROTO_IPV4,
35 .priority = NF_IP_PRI_SECURITY,
65}; 36};
66 37
67static unsigned int 38static unsigned int
68ipt_local_in_hook(unsigned int hook, 39iptable_security_hook(unsigned int hook, struct sk_buff *skb,
69 struct sk_buff *skb, 40 const struct net_device *in,
70 const struct net_device *in, 41 const struct net_device *out,
71 const struct net_device *out, 42 int (*okfn)(struct sk_buff *))
72 int (*okfn)(struct sk_buff *))
73{
74 return ipt_do_table(skb, hook, in, out,
75 dev_net(in)->ipv4.iptable_security);
76}
77
78static unsigned int
79ipt_forward_hook(unsigned int hook,
80 struct sk_buff *skb,
81 const struct net_device *in,
82 const struct net_device *out,
83 int (*okfn)(struct sk_buff *))
84{ 43{
85 return ipt_do_table(skb, hook, in, out, 44 const struct net *net;
86 dev_net(in)->ipv4.iptable_security);
87}
88 45
89static unsigned int 46 if (hook == NF_INET_LOCAL_OUT &&
90ipt_local_out_hook(unsigned int hook, 47 (skb->len < sizeof(struct iphdr) ||
91 struct sk_buff *skb, 48 ip_hdrlen(skb) < sizeof(struct iphdr)))
92 const struct net_device *in, 49 /* Somebody is playing with raw sockets. */
93 const struct net_device *out,
94 int (*okfn)(struct sk_buff *))
95{
96 /* Somebody is playing with raw sockets. */
97 if (skb->len < sizeof(struct iphdr) ||
98 ip_hdrlen(skb) < sizeof(struct iphdr))
99 return NF_ACCEPT; 50 return NF_ACCEPT;
100 return ipt_do_table(skb, hook, in, out, 51
101 dev_net(out)->ipv4.iptable_security); 52 net = dev_net((in != NULL) ? in : out);
53 return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_security);
102} 54}
103 55
104static struct nf_hook_ops ipt_ops[] __read_mostly = { 56static struct nf_hook_ops *sectbl_ops __read_mostly;
105 {
106 .hook = ipt_local_in_hook,
107 .owner = THIS_MODULE,
108 .pf = NFPROTO_IPV4,
109 .hooknum = NF_INET_LOCAL_IN,
110 .priority = NF_IP_PRI_SECURITY,
111 },
112 {
113 .hook = ipt_forward_hook,
114 .owner = THIS_MODULE,
115 .pf = NFPROTO_IPV4,
116 .hooknum = NF_INET_FORWARD,
117 .priority = NF_IP_PRI_SECURITY,
118 },
119 {
120 .hook = ipt_local_out_hook,
121 .owner = THIS_MODULE,
122 .pf = NFPROTO_IPV4,
123 .hooknum = NF_INET_LOCAL_OUT,
124 .priority = NF_IP_PRI_SECURITY,
125 },
126};
127 57
128static int __net_init iptable_security_net_init(struct net *net) 58static int __net_init iptable_security_net_init(struct net *net)
129{ 59{
130 net->ipv4.iptable_security = 60 struct ipt_replace *repl;
131 ipt_register_table(net, &security_table, &initial_table.repl);
132 61
62 repl = ipt_alloc_initial_table(&security_table);
63 if (repl == NULL)
64 return -ENOMEM;
65 net->ipv4.iptable_security =
66 ipt_register_table(net, &security_table, repl);
67 kfree(repl);
133 if (IS_ERR(net->ipv4.iptable_security)) 68 if (IS_ERR(net->ipv4.iptable_security))
134 return PTR_ERR(net->ipv4.iptable_security); 69 return PTR_ERR(net->ipv4.iptable_security);
135 70
@@ -138,7 +73,7 @@ static int __net_init iptable_security_net_init(struct net *net)
138 73
139static void __net_exit iptable_security_net_exit(struct net *net) 74static void __net_exit iptable_security_net_exit(struct net *net)
140{ 75{
141 ipt_unregister_table(net->ipv4.iptable_security); 76 ipt_unregister_table(net, net->ipv4.iptable_security);
142} 77}
143 78
144static struct pernet_operations iptable_security_net_ops = { 79static struct pernet_operations iptable_security_net_ops = {
@@ -154,9 +89,11 @@ static int __init iptable_security_init(void)
154 if (ret < 0) 89 if (ret < 0)
155 return ret; 90 return ret;
156 91
157 ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 92 sectbl_ops = xt_hook_link(&security_table, iptable_security_hook);
158 if (ret < 0) 93 if (IS_ERR(sectbl_ops)) {
94 ret = PTR_ERR(sectbl_ops);
159 goto cleanup_table; 95 goto cleanup_table;
96 }
160 97
161 return ret; 98 return ret;
162 99
@@ -167,7 +104,7 @@ cleanup_table:
167 104
168static void __exit iptable_security_fini(void) 105static void __exit iptable_security_fini(void)
169{ 106{
170 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 107 xt_hook_unlink(&security_table, sectbl_ops);
171 unregister_pernet_subsys(&iptable_security_net_ops); 108 unregister_pernet_subsys(&iptable_security_net_ops);
172} 109}
173 110
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index d171b123a656..2bb1f87051c4 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -22,6 +22,7 @@
22#include <net/netfilter/nf_conntrack_helper.h> 22#include <net/netfilter/nf_conntrack_helper.h>
23#include <net/netfilter/nf_conntrack_l4proto.h> 23#include <net/netfilter/nf_conntrack_l4proto.h>
24#include <net/netfilter/nf_conntrack_l3proto.h> 24#include <net/netfilter/nf_conntrack_l3proto.h>
25#include <net/netfilter/nf_conntrack_zones.h>
25#include <net/netfilter/nf_conntrack_core.h> 26#include <net/netfilter/nf_conntrack_core.h>
26#include <net/netfilter/ipv4/nf_conntrack_ipv4.h> 27#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
27#include <net/netfilter/nf_nat_helper.h> 28#include <net/netfilter/nf_nat_helper.h>
@@ -210,7 +211,7 @@ static ctl_table ip_ct_sysctl_table[] = {
210 }, 211 },
211 { 212 {
212 .procname = "ip_conntrack_buckets", 213 .procname = "ip_conntrack_buckets",
213 .data = &nf_conntrack_htable_size, 214 .data = &init_net.ct.htable_size,
214 .maxlen = sizeof(unsigned int), 215 .maxlen = sizeof(unsigned int),
215 .mode = 0444, 216 .mode = 0444,
216 .proc_handler = proc_dointvec, 217 .proc_handler = proc_dointvec,
@@ -266,7 +267,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
266 return -EINVAL; 267 return -EINVAL;
267 } 268 }
268 269
269 h = nf_conntrack_find_get(sock_net(sk), &tuple); 270 h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
270 if (h) { 271 if (h) {
271 struct sockaddr_in sin; 272 struct sockaddr_in sin;
272 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); 273 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 8668a3defda6..2fb7b76da94f 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -32,7 +32,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
32 struct hlist_nulls_node *n; 32 struct hlist_nulls_node *n;
33 33
34 for (st->bucket = 0; 34 for (st->bucket = 0;
35 st->bucket < nf_conntrack_htable_size; 35 st->bucket < net->ct.htable_size;
36 st->bucket++) { 36 st->bucket++) {
37 n = rcu_dereference(net->ct.hash[st->bucket].first); 37 n = rcu_dereference(net->ct.hash[st->bucket].first);
38 if (!is_a_nulls(n)) 38 if (!is_a_nulls(n))
@@ -50,7 +50,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
50 head = rcu_dereference(head->next); 50 head = rcu_dereference(head->next);
51 while (is_a_nulls(head)) { 51 while (is_a_nulls(head)) {
52 if (likely(get_nulls_value(head) == st->bucket)) { 52 if (likely(get_nulls_value(head) == st->bucket)) {
53 if (++st->bucket >= nf_conntrack_htable_size) 53 if (++st->bucket >= net->ct.htable_size)
54 return NULL; 54 return NULL;
55 } 55 }
56 head = rcu_dereference(net->ct.hash[st->bucket].first); 56 head = rcu_dereference(net->ct.hash[st->bucket].first);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 7afd39b5b781..7404bde95994 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -18,6 +18,7 @@
18#include <net/netfilter/nf_conntrack_tuple.h> 18#include <net/netfilter/nf_conntrack_tuple.h>
19#include <net/netfilter/nf_conntrack_l4proto.h> 19#include <net/netfilter/nf_conntrack_l4proto.h>
20#include <net/netfilter/nf_conntrack_core.h> 20#include <net/netfilter/nf_conntrack_core.h>
21#include <net/netfilter/nf_conntrack_zones.h>
21#include <net/netfilter/nf_log.h> 22#include <net/netfilter/nf_log.h>
22 23
23static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ; 24static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
@@ -114,13 +115,14 @@ static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
114 115
115/* Returns conntrack if it dealt with ICMP, and filled in skb fields */ 116/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
116static int 117static int
117icmp_error_message(struct net *net, struct sk_buff *skb, 118icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
118 enum ip_conntrack_info *ctinfo, 119 enum ip_conntrack_info *ctinfo,
119 unsigned int hooknum) 120 unsigned int hooknum)
120{ 121{
121 struct nf_conntrack_tuple innertuple, origtuple; 122 struct nf_conntrack_tuple innertuple, origtuple;
122 const struct nf_conntrack_l4proto *innerproto; 123 const struct nf_conntrack_l4proto *innerproto;
123 const struct nf_conntrack_tuple_hash *h; 124 const struct nf_conntrack_tuple_hash *h;
125 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
124 126
125 NF_CT_ASSERT(skb->nfct == NULL); 127 NF_CT_ASSERT(skb->nfct == NULL);
126 128
@@ -146,7 +148,7 @@ icmp_error_message(struct net *net, struct sk_buff *skb,
146 148
147 *ctinfo = IP_CT_RELATED; 149 *ctinfo = IP_CT_RELATED;
148 150
149 h = nf_conntrack_find_get(net, &innertuple); 151 h = nf_conntrack_find_get(net, zone, &innertuple);
150 if (!h) { 152 if (!h) {
151 pr_debug("icmp_error_message: no match\n"); 153 pr_debug("icmp_error_message: no match\n");
152 return -NF_ACCEPT; 154 return -NF_ACCEPT;
@@ -163,7 +165,8 @@ icmp_error_message(struct net *net, struct sk_buff *skb,
163 165
164/* Small and modified version of icmp_rcv */ 166/* Small and modified version of icmp_rcv */
165static int 167static int
166icmp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff, 168icmp_error(struct net *net, struct nf_conn *tmpl,
169 struct sk_buff *skb, unsigned int dataoff,
167 enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) 170 enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum)
168{ 171{
169 const struct icmphdr *icmph; 172 const struct icmphdr *icmph;
@@ -208,7 +211,7 @@ icmp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
208 icmph->type != ICMP_REDIRECT) 211 icmph->type != ICMP_REDIRECT)
209 return NF_ACCEPT; 212 return NF_ACCEPT;
210 213
211 return icmp_error_message(net, skb, ctinfo, hooknum); 214 return icmp_error_message(net, tmpl, skb, ctinfo, hooknum);
212} 215}
213 216
214#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 217#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index fa2d6b6fc3e5..cb763ae9ed90 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -14,8 +14,13 @@
14#include <net/route.h> 14#include <net/route.h>
15#include <net/ip.h> 15#include <net/ip.h>
16 16
17#include <linux/netfilter_bridge.h>
17#include <linux/netfilter_ipv4.h> 18#include <linux/netfilter_ipv4.h>
18#include <net/netfilter/ipv4/nf_defrag_ipv4.h> 19#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
20#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
21#include <net/netfilter/nf_conntrack.h>
22#endif
23#include <net/netfilter/nf_conntrack_zones.h>
19 24
20/* Returns new sk_buff, or NULL */ 25/* Returns new sk_buff, or NULL */
21static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) 26static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
@@ -34,6 +39,27 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
34 return err; 39 return err;
35} 40}
36 41
42static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
43 struct sk_buff *skb)
44{
45 u16 zone = NF_CT_DEFAULT_ZONE;
46
47#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
48 if (skb->nfct)
49 zone = nf_ct_zone((struct nf_conn *)skb->nfct);
50#endif
51
52#ifdef CONFIG_BRIDGE_NETFILTER
53 if (skb->nf_bridge &&
54 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
55 return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
56#endif
57 if (hooknum == NF_INET_PRE_ROUTING)
58 return IP_DEFRAG_CONNTRACK_IN + zone;
59 else
60 return IP_DEFRAG_CONNTRACK_OUT + zone;
61}
62
37static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, 63static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
38 struct sk_buff *skb, 64 struct sk_buff *skb,
39 const struct net_device *in, 65 const struct net_device *in,
@@ -44,16 +70,14 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
44#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE) 70#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE)
45 /* Previously seen (loopback)? Ignore. Do this before 71 /* Previously seen (loopback)? Ignore. Do this before
46 fragment check. */ 72 fragment check. */
47 if (skb->nfct) 73 if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
48 return NF_ACCEPT; 74 return NF_ACCEPT;
49#endif 75#endif
50#endif 76#endif
51 /* Gather fragments. */ 77 /* Gather fragments. */
52 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { 78 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
53 if (nf_ct_ipv4_gather_frags(skb, 79 enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
54 hooknum == NF_INET_PRE_ROUTING ? 80 if (nf_ct_ipv4_gather_frags(skb, user))
55 IP_DEFRAG_CONNTRACK_IN :
56 IP_DEFRAG_CONNTRACK_OUT))
57 return NF_STOLEN; 81 return NF_STOLEN;
58 } 82 }
59 return NF_ACCEPT; 83 return NF_ACCEPT;
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index fe1a64479dd0..4595281c2863 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -30,14 +30,12 @@
30#include <net/netfilter/nf_conntrack_helper.h> 30#include <net/netfilter/nf_conntrack_helper.h>
31#include <net/netfilter/nf_conntrack_l3proto.h> 31#include <net/netfilter/nf_conntrack_l3proto.h>
32#include <net/netfilter/nf_conntrack_l4proto.h> 32#include <net/netfilter/nf_conntrack_l4proto.h>
33#include <net/netfilter/nf_conntrack_zones.h>
33 34
34static DEFINE_SPINLOCK(nf_nat_lock); 35static DEFINE_SPINLOCK(nf_nat_lock);
35 36
36static struct nf_conntrack_l3proto *l3proto __read_mostly; 37static struct nf_conntrack_l3proto *l3proto __read_mostly;
37 38
38/* Calculated at init based on memory size */
39static unsigned int nf_nat_htable_size __read_mostly;
40
41#define MAX_IP_NAT_PROTO 256 39#define MAX_IP_NAT_PROTO 256
42static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO] 40static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
43 __read_mostly; 41 __read_mostly;
@@ -72,15 +70,16 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
72 70
73/* We keep an extra hash for each conntrack, for fast searching. */ 71/* We keep an extra hash for each conntrack, for fast searching. */
74static inline unsigned int 72static inline unsigned int
75hash_by_src(const struct nf_conntrack_tuple *tuple) 73hash_by_src(const struct net *net, u16 zone,
74 const struct nf_conntrack_tuple *tuple)
76{ 75{
77 unsigned int hash; 76 unsigned int hash;
78 77
79 /* Original src, to ensure we map it consistently if poss. */ 78 /* Original src, to ensure we map it consistently if poss. */
80 hash = jhash_3words((__force u32)tuple->src.u3.ip, 79 hash = jhash_3words((__force u32)tuple->src.u3.ip,
81 (__force u32)tuple->src.u.all, 80 (__force u32)tuple->src.u.all ^ zone,
82 tuple->dst.protonum, 0); 81 tuple->dst.protonum, 0);
83 return ((u64)hash * nf_nat_htable_size) >> 32; 82 return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
84} 83}
85 84
86/* Is this tuple already taken? (not by us) */ 85/* Is this tuple already taken? (not by us) */
@@ -142,12 +141,12 @@ same_src(const struct nf_conn *ct,
142 141
143/* Only called for SRC manip */ 142/* Only called for SRC manip */
144static int 143static int
145find_appropriate_src(struct net *net, 144find_appropriate_src(struct net *net, u16 zone,
146 const struct nf_conntrack_tuple *tuple, 145 const struct nf_conntrack_tuple *tuple,
147 struct nf_conntrack_tuple *result, 146 struct nf_conntrack_tuple *result,
148 const struct nf_nat_range *range) 147 const struct nf_nat_range *range)
149{ 148{
150 unsigned int h = hash_by_src(tuple); 149 unsigned int h = hash_by_src(net, zone, tuple);
151 const struct nf_conn_nat *nat; 150 const struct nf_conn_nat *nat;
152 const struct nf_conn *ct; 151 const struct nf_conn *ct;
153 const struct hlist_node *n; 152 const struct hlist_node *n;
@@ -155,7 +154,7 @@ find_appropriate_src(struct net *net,
155 rcu_read_lock(); 154 rcu_read_lock();
156 hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) { 155 hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) {
157 ct = nat->ct; 156 ct = nat->ct;
158 if (same_src(ct, tuple)) { 157 if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
159 /* Copy source part from reply tuple. */ 158 /* Copy source part from reply tuple. */
160 nf_ct_invert_tuplepr(result, 159 nf_ct_invert_tuplepr(result,
161 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 160 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
@@ -178,7 +177,7 @@ find_appropriate_src(struct net *net,
178 the ip with the lowest src-ip/dst-ip/proto usage. 177 the ip with the lowest src-ip/dst-ip/proto usage.
179*/ 178*/
180static void 179static void
181find_best_ips_proto(struct nf_conntrack_tuple *tuple, 180find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
182 const struct nf_nat_range *range, 181 const struct nf_nat_range *range,
183 const struct nf_conn *ct, 182 const struct nf_conn *ct,
184 enum nf_nat_manip_type maniptype) 183 enum nf_nat_manip_type maniptype)
@@ -212,7 +211,7 @@ find_best_ips_proto(struct nf_conntrack_tuple *tuple,
212 maxip = ntohl(range->max_ip); 211 maxip = ntohl(range->max_ip);
213 j = jhash_2words((__force u32)tuple->src.u3.ip, 212 j = jhash_2words((__force u32)tuple->src.u3.ip,
214 range->flags & IP_NAT_RANGE_PERSISTENT ? 213 range->flags & IP_NAT_RANGE_PERSISTENT ?
215 0 : (__force u32)tuple->dst.u3.ip, 0); 214 0 : (__force u32)tuple->dst.u3.ip ^ zone, 0);
216 j = ((u64)j * (maxip - minip + 1)) >> 32; 215 j = ((u64)j * (maxip - minip + 1)) >> 32;
217 *var_ipp = htonl(minip + j); 216 *var_ipp = htonl(minip + j);
218} 217}
@@ -232,6 +231,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
232{ 231{
233 struct net *net = nf_ct_net(ct); 232 struct net *net = nf_ct_net(ct);
234 const struct nf_nat_protocol *proto; 233 const struct nf_nat_protocol *proto;
234 u16 zone = nf_ct_zone(ct);
235 235
236 /* 1) If this srcip/proto/src-proto-part is currently mapped, 236 /* 1) If this srcip/proto/src-proto-part is currently mapped,
237 and that same mapping gives a unique tuple within the given 237 and that same mapping gives a unique tuple within the given
@@ -242,7 +242,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
242 manips not an issue. */ 242 manips not an issue. */
243 if (maniptype == IP_NAT_MANIP_SRC && 243 if (maniptype == IP_NAT_MANIP_SRC &&
244 !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) { 244 !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
245 if (find_appropriate_src(net, orig_tuple, tuple, range)) { 245 if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) {
246 pr_debug("get_unique_tuple: Found current src map\n"); 246 pr_debug("get_unique_tuple: Found current src map\n");
247 if (!nf_nat_used_tuple(tuple, ct)) 247 if (!nf_nat_used_tuple(tuple, ct))
248 return; 248 return;
@@ -252,7 +252,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
252 /* 2) Select the least-used IP/proto combination in the given 252 /* 2) Select the least-used IP/proto combination in the given
253 range. */ 253 range. */
254 *tuple = *orig_tuple; 254 *tuple = *orig_tuple;
255 find_best_ips_proto(tuple, range, ct, maniptype); 255 find_best_ips_proto(zone, tuple, range, ct, maniptype);
256 256
257 /* 3) The per-protocol part of the manip is made to map into 257 /* 3) The per-protocol part of the manip is made to map into
258 the range to make a unique tuple. */ 258 the range to make a unique tuple. */
@@ -330,7 +330,8 @@ nf_nat_setup_info(struct nf_conn *ct,
330 if (have_to_hash) { 330 if (have_to_hash) {
331 unsigned int srchash; 331 unsigned int srchash;
332 332
333 srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 333 srchash = hash_by_src(net, nf_ct_zone(ct),
334 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
334 spin_lock_bh(&nf_nat_lock); 335 spin_lock_bh(&nf_nat_lock);
335 /* nf_conntrack_alter_reply might re-allocate exntension aera */ 336 /* nf_conntrack_alter_reply might re-allocate exntension aera */
336 nat = nfct_nat(ct); 337 nat = nfct_nat(ct);
@@ -679,8 +680,10 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
679 680
680static int __net_init nf_nat_net_init(struct net *net) 681static int __net_init nf_nat_net_init(struct net *net)
681{ 682{
682 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 683 /* Leave them the same for the moment. */
683 &net->ipv4.nat_vmalloced, 0); 684 net->ipv4.nat_htable_size = net->ct.htable_size;
685 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
686 &net->ipv4.nat_vmalloced, 0);
684 if (!net->ipv4.nat_bysource) 687 if (!net->ipv4.nat_bysource)
685 return -ENOMEM; 688 return -ENOMEM;
686 return 0; 689 return 0;
@@ -703,7 +706,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
703 nf_ct_iterate_cleanup(net, &clean_nat, NULL); 706 nf_ct_iterate_cleanup(net, &clean_nat, NULL);
704 synchronize_rcu(); 707 synchronize_rcu();
705 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced, 708 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
706 nf_nat_htable_size); 709 net->ipv4.nat_htable_size);
707} 710}
708 711
709static struct pernet_operations nf_nat_net_ops = { 712static struct pernet_operations nf_nat_net_ops = {
@@ -724,9 +727,6 @@ static int __init nf_nat_init(void)
724 return ret; 727 return ret;
725 } 728 }
726 729
727 /* Leave them the same for the moment. */
728 nf_nat_htable_size = nf_conntrack_htable_size;
729
730 ret = register_pernet_subsys(&nf_nat_net_ops); 730 ret = register_pernet_subsys(&nf_nat_net_ops);
731 if (ret < 0) 731 if (ret < 0)
732 goto cleanup_extend; 732 goto cleanup_extend;
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c
index a1d5d58a58bf..86e0e84ff0a0 100644
--- a/net/ipv4/netfilter/nf_nat_ftp.c
+++ b/net/ipv4/netfilter/nf_nat_ftp.c
@@ -27,76 +27,29 @@ MODULE_ALIAS("ip_nat_ftp");
27 27
28/* FIXME: Time out? --RR */ 28/* FIXME: Time out? --RR */
29 29
30static int 30static int nf_nat_ftp_fmt_cmd(enum nf_ct_ftp_type type,
31mangle_rfc959_packet(struct sk_buff *skb, 31 char *buffer, size_t buflen,
32 __be32 newip, 32 __be32 addr, u16 port)
33 u_int16_t port,
34 unsigned int matchoff,
35 unsigned int matchlen,
36 struct nf_conn *ct,
37 enum ip_conntrack_info ctinfo)
38{ 33{
39 char buffer[sizeof("nnn,nnn,nnn,nnn,nnn,nnn")]; 34 switch (type) {
40 35 case NF_CT_FTP_PORT:
41 sprintf(buffer, "%u,%u,%u,%u,%u,%u", 36 case NF_CT_FTP_PASV:
42 NIPQUAD(newip), port>>8, port&0xFF); 37 return snprintf(buffer, buflen, "%u,%u,%u,%u,%u,%u",
43 38 ((unsigned char *)&addr)[0],
44 pr_debug("calling nf_nat_mangle_tcp_packet\n"); 39 ((unsigned char *)&addr)[1],
45 40 ((unsigned char *)&addr)[2],
46 return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff, 41 ((unsigned char *)&addr)[3],
47 matchlen, buffer, strlen(buffer)); 42 port >> 8,
48} 43 port & 0xFF);
49 44 case NF_CT_FTP_EPRT:
50/* |1|132.235.1.2|6275| */ 45 return snprintf(buffer, buflen, "|1|%pI4|%u|", &addr, port);
51static int 46 case NF_CT_FTP_EPSV:
52mangle_eprt_packet(struct sk_buff *skb, 47 return snprintf(buffer, buflen, "|||%u|", port);
53 __be32 newip, 48 }
54 u_int16_t port,
55 unsigned int matchoff,
56 unsigned int matchlen,
57 struct nf_conn *ct,
58 enum ip_conntrack_info ctinfo)
59{
60 char buffer[sizeof("|1|255.255.255.255|65535|")];
61
62 sprintf(buffer, "|1|%u.%u.%u.%u|%u|", NIPQUAD(newip), port);
63
64 pr_debug("calling nf_nat_mangle_tcp_packet\n");
65
66 return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff,
67 matchlen, buffer, strlen(buffer));
68}
69
70/* |1|132.235.1.2|6275| */
71static int
72mangle_epsv_packet(struct sk_buff *skb,
73 __be32 newip,
74 u_int16_t port,
75 unsigned int matchoff,
76 unsigned int matchlen,
77 struct nf_conn *ct,
78 enum ip_conntrack_info ctinfo)
79{
80 char buffer[sizeof("|||65535|")];
81
82 sprintf(buffer, "|||%u|", port);
83
84 pr_debug("calling nf_nat_mangle_tcp_packet\n");
85 49
86 return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff, 50 return 0;
87 matchlen, buffer, strlen(buffer));
88} 51}
89 52
90static int (*mangle[])(struct sk_buff *, __be32, u_int16_t,
91 unsigned int, unsigned int, struct nf_conn *,
92 enum ip_conntrack_info)
93= {
94 [NF_CT_FTP_PORT] = mangle_rfc959_packet,
95 [NF_CT_FTP_PASV] = mangle_rfc959_packet,
96 [NF_CT_FTP_EPRT] = mangle_eprt_packet,
97 [NF_CT_FTP_EPSV] = mangle_epsv_packet
98};
99
100/* So, this packet has hit the connection tracking matching code. 53/* So, this packet has hit the connection tracking matching code.
101 Mangle it, and change the expectation to match the new version. */ 54 Mangle it, and change the expectation to match the new version. */
102static unsigned int nf_nat_ftp(struct sk_buff *skb, 55static unsigned int nf_nat_ftp(struct sk_buff *skb,
@@ -110,6 +63,8 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
110 u_int16_t port; 63 u_int16_t port;
111 int dir = CTINFO2DIR(ctinfo); 64 int dir = CTINFO2DIR(ctinfo);
112 struct nf_conn *ct = exp->master; 65 struct nf_conn *ct = exp->master;
66 char buffer[sizeof("|1|255.255.255.255|65535|")];
67 unsigned int buflen;
113 68
114 pr_debug("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen); 69 pr_debug("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen);
115 70
@@ -132,11 +87,21 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
132 if (port == 0) 87 if (port == 0)
133 return NF_DROP; 88 return NF_DROP;
134 89
135 if (!mangle[type](skb, newip, port, matchoff, matchlen, ct, ctinfo)) { 90 buflen = nf_nat_ftp_fmt_cmd(type, buffer, sizeof(buffer), newip, port);
136 nf_ct_unexpect_related(exp); 91 if (!buflen)
137 return NF_DROP; 92 goto out;
138 } 93
94 pr_debug("calling nf_nat_mangle_tcp_packet\n");
95
96 if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff,
97 matchlen, buffer, buflen))
98 goto out;
99
139 return NF_ACCEPT; 100 return NF_ACCEPT;
101
102out:
103 nf_ct_unexpect_related(exp);
104 return NF_DROP;
140} 105}
141 106
142static void __exit nf_nat_ftp_fini(void) 107static void __exit nf_nat_ftp_fini(void)
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 7f10a6be0191..4b6af4bb1f50 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -141,6 +141,17 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
141 return 1; 141 return 1;
142} 142}
143 143
144void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
145 __be32 seq, s16 off)
146{
147 if (!off)
148 return;
149 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
150 adjust_tcp_sequence(ntohl(seq), off, ct, ctinfo);
151 nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
152}
153EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
154
144/* Generic function for mangling variable-length address changes inside 155/* Generic function for mangling variable-length address changes inside
145 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX 156 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
146 * command in FTP). 157 * command in FTP).
@@ -149,14 +160,13 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
149 * skb enlargement, ... 160 * skb enlargement, ...
150 * 161 *
151 * */ 162 * */
152int 163int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
153nf_nat_mangle_tcp_packet(struct sk_buff *skb, 164 struct nf_conn *ct,
154 struct nf_conn *ct, 165 enum ip_conntrack_info ctinfo,
155 enum ip_conntrack_info ctinfo, 166 unsigned int match_offset,
156 unsigned int match_offset, 167 unsigned int match_len,
157 unsigned int match_len, 168 const char *rep_buffer,
158 const char *rep_buffer, 169 unsigned int rep_len, bool adjust)
159 unsigned int rep_len)
160{ 170{
161 struct rtable *rt = skb_rtable(skb); 171 struct rtable *rt = skb_rtable(skb);
162 struct iphdr *iph; 172 struct iphdr *iph;
@@ -202,16 +212,13 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb,
202 inet_proto_csum_replace2(&tcph->check, skb, 212 inet_proto_csum_replace2(&tcph->check, skb,
203 htons(oldlen), htons(datalen), 1); 213 htons(oldlen), htons(datalen), 1);
204 214
205 if (rep_len != match_len) { 215 if (adjust && rep_len != match_len)
206 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); 216 nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
207 adjust_tcp_sequence(ntohl(tcph->seq), 217 (int)rep_len - (int)match_len);
208 (int)rep_len - (int)match_len, 218
209 ct, ctinfo);
210 nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
211 }
212 return 1; 219 return 1;
213} 220}
214EXPORT_SYMBOL(nf_nat_mangle_tcp_packet); 221EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet);
215 222
216/* Generic function for mangling variable-length address changes inside 223/* Generic function for mangling variable-length address changes inside
217 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX 224 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 9eb171056c63..4c060038d29f 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -25,6 +25,7 @@
25#include <net/netfilter/nf_nat_rule.h> 25#include <net/netfilter/nf_nat_rule.h>
26#include <net/netfilter/nf_conntrack_helper.h> 26#include <net/netfilter/nf_conntrack_helper.h>
27#include <net/netfilter/nf_conntrack_expect.h> 27#include <net/netfilter/nf_conntrack_expect.h>
28#include <net/netfilter/nf_conntrack_zones.h>
28#include <linux/netfilter/nf_conntrack_proto_gre.h> 29#include <linux/netfilter/nf_conntrack_proto_gre.h>
29#include <linux/netfilter/nf_conntrack_pptp.h> 30#include <linux/netfilter/nf_conntrack_pptp.h>
30 31
@@ -74,7 +75,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
74 75
75 pr_debug("trying to unexpect other dir: "); 76 pr_debug("trying to unexpect other dir: ");
76 nf_ct_dump_tuple_ip(&t); 77 nf_ct_dump_tuple_ip(&t);
77 other_exp = nf_ct_expect_find_get(net, &t); 78 other_exp = nf_ct_expect_find_get(net, nf_ct_zone(ct), &t);
78 if (other_exp) { 79 if (other_exp) {
79 nf_ct_unexpect_related(other_exp); 80 nf_ct_unexpect_related(other_exp);
80 nf_ct_expect_put(other_exp); 81 nf_ct_expect_put(other_exp);
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 9e81e0dfb4ec..ab74cc0535e2 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -28,36 +28,6 @@
28 (1 << NF_INET_POST_ROUTING) | \ 28 (1 << NF_INET_POST_ROUTING) | \
29 (1 << NF_INET_LOCAL_OUT)) 29 (1 << NF_INET_LOCAL_OUT))
30 30
31static const struct
32{
33 struct ipt_replace repl;
34 struct ipt_standard entries[3];
35 struct ipt_error term;
36} nat_initial_table __net_initdata = {
37 .repl = {
38 .name = "nat",
39 .valid_hooks = NAT_VALID_HOOKS,
40 .num_entries = 4,
41 .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
42 .hook_entry = {
43 [NF_INET_PRE_ROUTING] = 0,
44 [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard),
45 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
46 },
47 .underflow = {
48 [NF_INET_PRE_ROUTING] = 0,
49 [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard),
50 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
51 },
52 },
53 .entries = {
54 IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
55 IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
56 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
57 },
58 .term = IPT_ERROR_INIT, /* ERROR */
59};
60
61static const struct xt_table nat_table = { 31static const struct xt_table nat_table = {
62 .name = "nat", 32 .name = "nat",
63 .valid_hooks = NAT_VALID_HOOKS, 33 .valid_hooks = NAT_VALID_HOOKS,
@@ -186,8 +156,13 @@ static struct xt_target ipt_dnat_reg __read_mostly = {
186 156
187static int __net_init nf_nat_rule_net_init(struct net *net) 157static int __net_init nf_nat_rule_net_init(struct net *net)
188{ 158{
189 net->ipv4.nat_table = ipt_register_table(net, &nat_table, 159 struct ipt_replace *repl;
190 &nat_initial_table.repl); 160
161 repl = ipt_alloc_initial_table(&nat_table);
162 if (repl == NULL)
163 return -ENOMEM;
164 net->ipv4.nat_table = ipt_register_table(net, &nat_table, repl);
165 kfree(repl);
191 if (IS_ERR(net->ipv4.nat_table)) 166 if (IS_ERR(net->ipv4.nat_table))
192 return PTR_ERR(net->ipv4.nat_table); 167 return PTR_ERR(net->ipv4.nat_table);
193 return 0; 168 return 0;
@@ -195,7 +170,7 @@ static int __net_init nf_nat_rule_net_init(struct net *net)
195 170
196static void __net_exit nf_nat_rule_net_exit(struct net *net) 171static void __net_exit nf_nat_rule_net_exit(struct net *net)
197{ 172{
198 ipt_unregister_table(net->ipv4.nat_table); 173 ipt_unregister_table(net, net->ipv4.nat_table);
199} 174}
200 175
201static struct pernet_operations nf_nat_rule_net_ops = { 176static struct pernet_operations nf_nat_rule_net_ops = {
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 07d61a57613c..11b538deaaec 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -1,4 +1,4 @@
1/* SIP extension for UDP NAT alteration. 1/* SIP extension for NAT alteration.
2 * 2 *
3 * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar> 3 * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar>
4 * based on RR's ip_nat_ftp.c and other modules. 4 * based on RR's ip_nat_ftp.c and other modules.
@@ -15,6 +15,7 @@
15#include <linux/ip.h> 15#include <linux/ip.h>
16#include <net/ip.h> 16#include <net/ip.h>
17#include <linux/udp.h> 17#include <linux/udp.h>
18#include <linux/tcp.h>
18 19
19#include <net/netfilter/nf_nat.h> 20#include <net/netfilter/nf_nat.h>
20#include <net/netfilter/nf_nat_helper.h> 21#include <net/netfilter/nf_nat_helper.h>
@@ -29,25 +30,42 @@ MODULE_DESCRIPTION("SIP NAT helper");
29MODULE_ALIAS("ip_nat_sip"); 30MODULE_ALIAS("ip_nat_sip");
30 31
31 32
32static unsigned int mangle_packet(struct sk_buff *skb, 33static unsigned int mangle_packet(struct sk_buff *skb, unsigned int dataoff,
33 const char **dptr, unsigned int *datalen, 34 const char **dptr, unsigned int *datalen,
34 unsigned int matchoff, unsigned int matchlen, 35 unsigned int matchoff, unsigned int matchlen,
35 const char *buffer, unsigned int buflen) 36 const char *buffer, unsigned int buflen)
36{ 37{
37 enum ip_conntrack_info ctinfo; 38 enum ip_conntrack_info ctinfo;
38 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 39 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
39 40 struct tcphdr *th;
40 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, matchoff, matchlen, 41 unsigned int baseoff;
41 buffer, buflen)) 42
42 return 0; 43 if (nf_ct_protonum(ct) == IPPROTO_TCP) {
44 th = (struct tcphdr *)(skb->data + ip_hdrlen(skb));
45 baseoff = ip_hdrlen(skb) + th->doff * 4;
46 matchoff += dataoff - baseoff;
47
48 if (!__nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
49 matchoff, matchlen,
50 buffer, buflen, false))
51 return 0;
52 } else {
53 baseoff = ip_hdrlen(skb) + sizeof(struct udphdr);
54 matchoff += dataoff - baseoff;
55
56 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
57 matchoff, matchlen,
58 buffer, buflen))
59 return 0;
60 }
43 61
44 /* Reload data pointer and adjust datalen value */ 62 /* Reload data pointer and adjust datalen value */
45 *dptr = skb->data + ip_hdrlen(skb) + sizeof(struct udphdr); 63 *dptr = skb->data + dataoff;
46 *datalen += buflen - matchlen; 64 *datalen += buflen - matchlen;
47 return 1; 65 return 1;
48} 66}
49 67
50static int map_addr(struct sk_buff *skb, 68static int map_addr(struct sk_buff *skb, unsigned int dataoff,
51 const char **dptr, unsigned int *datalen, 69 const char **dptr, unsigned int *datalen,
52 unsigned int matchoff, unsigned int matchlen, 70 unsigned int matchoff, unsigned int matchlen,
53 union nf_inet_addr *addr, __be16 port) 71 union nf_inet_addr *addr, __be16 port)
@@ -76,11 +94,11 @@ static int map_addr(struct sk_buff *skb,
76 94
77 buflen = sprintf(buffer, "%pI4:%u", &newaddr, ntohs(newport)); 95 buflen = sprintf(buffer, "%pI4:%u", &newaddr, ntohs(newport));
78 96
79 return mangle_packet(skb, dptr, datalen, matchoff, matchlen, 97 return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
80 buffer, buflen); 98 buffer, buflen);
81} 99}
82 100
83static int map_sip_addr(struct sk_buff *skb, 101static int map_sip_addr(struct sk_buff *skb, unsigned int dataoff,
84 const char **dptr, unsigned int *datalen, 102 const char **dptr, unsigned int *datalen,
85 enum sip_header_types type) 103 enum sip_header_types type)
86{ 104{
@@ -93,16 +111,18 @@ static int map_sip_addr(struct sk_buff *skb,
93 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL, 111 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL,
94 &matchoff, &matchlen, &addr, &port) <= 0) 112 &matchoff, &matchlen, &addr, &port) <= 0)
95 return 1; 113 return 1;
96 return map_addr(skb, dptr, datalen, matchoff, matchlen, &addr, port); 114 return map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
115 &addr, port);
97} 116}
98 117
99static unsigned int ip_nat_sip(struct sk_buff *skb, 118static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
100 const char **dptr, unsigned int *datalen) 119 const char **dptr, unsigned int *datalen)
101{ 120{
102 enum ip_conntrack_info ctinfo; 121 enum ip_conntrack_info ctinfo;
103 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 122 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
104 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 123 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
105 unsigned int dataoff, matchoff, matchlen; 124 unsigned int coff, matchoff, matchlen;
125 enum sip_header_types hdr;
106 union nf_inet_addr addr; 126 union nf_inet_addr addr;
107 __be16 port; 127 __be16 port;
108 int request, in_header; 128 int request, in_header;
@@ -112,16 +132,21 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
112 if (ct_sip_parse_request(ct, *dptr, *datalen, 132 if (ct_sip_parse_request(ct, *dptr, *datalen,
113 &matchoff, &matchlen, 133 &matchoff, &matchlen,
114 &addr, &port) > 0 && 134 &addr, &port) > 0 &&
115 !map_addr(skb, dptr, datalen, matchoff, matchlen, 135 !map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
116 &addr, port)) 136 &addr, port))
117 return NF_DROP; 137 return NF_DROP;
118 request = 1; 138 request = 1;
119 } else 139 } else
120 request = 0; 140 request = 0;
121 141
142 if (nf_ct_protonum(ct) == IPPROTO_TCP)
143 hdr = SIP_HDR_VIA_TCP;
144 else
145 hdr = SIP_HDR_VIA_UDP;
146
122 /* Translate topmost Via header and parameters */ 147 /* Translate topmost Via header and parameters */
123 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, 148 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
124 SIP_HDR_VIA, NULL, &matchoff, &matchlen, 149 hdr, NULL, &matchoff, &matchlen,
125 &addr, &port) > 0) { 150 &addr, &port) > 0) {
126 unsigned int matchend, poff, plen, buflen, n; 151 unsigned int matchend, poff, plen, buflen, n;
127 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; 152 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
@@ -138,7 +163,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
138 goto next; 163 goto next;
139 } 164 }
140 165
141 if (!map_addr(skb, dptr, datalen, matchoff, matchlen, 166 if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
142 &addr, port)) 167 &addr, port))
143 return NF_DROP; 168 return NF_DROP;
144 169
@@ -153,8 +178,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
153 addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) { 178 addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) {
154 buflen = sprintf(buffer, "%pI4", 179 buflen = sprintf(buffer, "%pI4",
155 &ct->tuplehash[!dir].tuple.dst.u3.ip); 180 &ct->tuplehash[!dir].tuple.dst.u3.ip);
156 if (!mangle_packet(skb, dptr, datalen, poff, plen, 181 if (!mangle_packet(skb, dataoff, dptr, datalen,
157 buffer, buflen)) 182 poff, plen, buffer, buflen))
158 return NF_DROP; 183 return NF_DROP;
159 } 184 }
160 185
@@ -167,8 +192,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
167 addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { 192 addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) {
168 buflen = sprintf(buffer, "%pI4", 193 buflen = sprintf(buffer, "%pI4",
169 &ct->tuplehash[!dir].tuple.src.u3.ip); 194 &ct->tuplehash[!dir].tuple.src.u3.ip);
170 if (!mangle_packet(skb, dptr, datalen, poff, plen, 195 if (!mangle_packet(skb, dataoff, dptr, datalen,
171 buffer, buflen)) 196 poff, plen, buffer, buflen))
172 return NF_DROP; 197 return NF_DROP;
173 } 198 }
174 199
@@ -181,31 +206,45 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
181 htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) { 206 htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) {
182 __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port; 207 __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port;
183 buflen = sprintf(buffer, "%u", ntohs(p)); 208 buflen = sprintf(buffer, "%u", ntohs(p));
184 if (!mangle_packet(skb, dptr, datalen, poff, plen, 209 if (!mangle_packet(skb, dataoff, dptr, datalen,
185 buffer, buflen)) 210 poff, plen, buffer, buflen))
186 return NF_DROP; 211 return NF_DROP;
187 } 212 }
188 } 213 }
189 214
190next: 215next:
191 /* Translate Contact headers */ 216 /* Translate Contact headers */
192 dataoff = 0; 217 coff = 0;
193 in_header = 0; 218 in_header = 0;
194 while (ct_sip_parse_header_uri(ct, *dptr, &dataoff, *datalen, 219 while (ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen,
195 SIP_HDR_CONTACT, &in_header, 220 SIP_HDR_CONTACT, &in_header,
196 &matchoff, &matchlen, 221 &matchoff, &matchlen,
197 &addr, &port) > 0) { 222 &addr, &port) > 0) {
198 if (!map_addr(skb, dptr, datalen, matchoff, matchlen, 223 if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
199 &addr, port)) 224 &addr, port))
200 return NF_DROP; 225 return NF_DROP;
201 } 226 }
202 227
203 if (!map_sip_addr(skb, dptr, datalen, SIP_HDR_FROM) || 228 if (!map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_FROM) ||
204 !map_sip_addr(skb, dptr, datalen, SIP_HDR_TO)) 229 !map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_TO))
205 return NF_DROP; 230 return NF_DROP;
231
206 return NF_ACCEPT; 232 return NF_ACCEPT;
207} 233}
208 234
235static void ip_nat_sip_seq_adjust(struct sk_buff *skb, s16 off)
236{
237 enum ip_conntrack_info ctinfo;
238 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
239 const struct tcphdr *th;
240
241 if (nf_ct_protonum(ct) != IPPROTO_TCP || off == 0)
242 return;
243
244 th = (struct tcphdr *)(skb->data + ip_hdrlen(skb));
245 nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
246}
247
209/* Handles expected signalling connections and media streams */ 248/* Handles expected signalling connections and media streams */
210static void ip_nat_sip_expected(struct nf_conn *ct, 249static void ip_nat_sip_expected(struct nf_conn *ct,
211 struct nf_conntrack_expect *exp) 250 struct nf_conntrack_expect *exp)
@@ -232,7 +271,7 @@ static void ip_nat_sip_expected(struct nf_conn *ct,
232 } 271 }
233} 272}
234 273
235static unsigned int ip_nat_sip_expect(struct sk_buff *skb, 274static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff,
236 const char **dptr, unsigned int *datalen, 275 const char **dptr, unsigned int *datalen,
237 struct nf_conntrack_expect *exp, 276 struct nf_conntrack_expect *exp,
238 unsigned int matchoff, 277 unsigned int matchoff,
@@ -279,8 +318,8 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb,
279 if (exp->tuple.dst.u3.ip != exp->saved_ip || 318 if (exp->tuple.dst.u3.ip != exp->saved_ip ||
280 exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) { 319 exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) {
281 buflen = sprintf(buffer, "%pI4:%u", &newip, port); 320 buflen = sprintf(buffer, "%pI4:%u", &newip, port);
282 if (!mangle_packet(skb, dptr, datalen, matchoff, matchlen, 321 if (!mangle_packet(skb, dataoff, dptr, datalen,
283 buffer, buflen)) 322 matchoff, matchlen, buffer, buflen))
284 goto err; 323 goto err;
285 } 324 }
286 return NF_ACCEPT; 325 return NF_ACCEPT;
@@ -290,7 +329,7 @@ err:
290 return NF_DROP; 329 return NF_DROP;
291} 330}
292 331
293static int mangle_content_len(struct sk_buff *skb, 332static int mangle_content_len(struct sk_buff *skb, unsigned int dataoff,
294 const char **dptr, unsigned int *datalen) 333 const char **dptr, unsigned int *datalen)
295{ 334{
296 enum ip_conntrack_info ctinfo; 335 enum ip_conntrack_info ctinfo;
@@ -312,12 +351,13 @@ static int mangle_content_len(struct sk_buff *skb,
312 return 0; 351 return 0;
313 352
314 buflen = sprintf(buffer, "%u", c_len); 353 buflen = sprintf(buffer, "%u", c_len);
315 return mangle_packet(skb, dptr, datalen, matchoff, matchlen, 354 return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
316 buffer, buflen); 355 buffer, buflen);
317} 356}
318 357
319static int mangle_sdp_packet(struct sk_buff *skb, const char **dptr, 358static int mangle_sdp_packet(struct sk_buff *skb, unsigned int dataoff,
320 unsigned int dataoff, unsigned int *datalen, 359 const char **dptr, unsigned int *datalen,
360 unsigned int sdpoff,
321 enum sdp_header_types type, 361 enum sdp_header_types type,
322 enum sdp_header_types term, 362 enum sdp_header_types term,
323 char *buffer, int buflen) 363 char *buffer, int buflen)
@@ -326,16 +366,16 @@ static int mangle_sdp_packet(struct sk_buff *skb, const char **dptr,
326 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 366 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
327 unsigned int matchlen, matchoff; 367 unsigned int matchlen, matchoff;
328 368
329 if (ct_sip_get_sdp_header(ct, *dptr, dataoff, *datalen, type, term, 369 if (ct_sip_get_sdp_header(ct, *dptr, sdpoff, *datalen, type, term,
330 &matchoff, &matchlen) <= 0) 370 &matchoff, &matchlen) <= 0)
331 return -ENOENT; 371 return -ENOENT;
332 return mangle_packet(skb, dptr, datalen, matchoff, matchlen, 372 return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
333 buffer, buflen) ? 0 : -EINVAL; 373 buffer, buflen) ? 0 : -EINVAL;
334} 374}
335 375
336static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr, 376static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, unsigned int dataoff,
337 unsigned int dataoff, 377 const char **dptr, unsigned int *datalen,
338 unsigned int *datalen, 378 unsigned int sdpoff,
339 enum sdp_header_types type, 379 enum sdp_header_types type,
340 enum sdp_header_types term, 380 enum sdp_header_types term,
341 const union nf_inet_addr *addr) 381 const union nf_inet_addr *addr)
@@ -344,16 +384,15 @@ static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr,
344 unsigned int buflen; 384 unsigned int buflen;
345 385
346 buflen = sprintf(buffer, "%pI4", &addr->ip); 386 buflen = sprintf(buffer, "%pI4", &addr->ip);
347 if (mangle_sdp_packet(skb, dptr, dataoff, datalen, type, term, 387 if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff, type, term,
348 buffer, buflen)) 388 buffer, buflen))
349 return 0; 389 return 0;
350 390
351 return mangle_content_len(skb, dptr, datalen); 391 return mangle_content_len(skb, dataoff, dptr, datalen);
352} 392}
353 393
354static unsigned int ip_nat_sdp_port(struct sk_buff *skb, 394static unsigned int ip_nat_sdp_port(struct sk_buff *skb, unsigned int dataoff,
355 const char **dptr, 395 const char **dptr, unsigned int *datalen,
356 unsigned int *datalen,
357 unsigned int matchoff, 396 unsigned int matchoff,
358 unsigned int matchlen, 397 unsigned int matchlen,
359 u_int16_t port) 398 u_int16_t port)
@@ -362,16 +401,16 @@ static unsigned int ip_nat_sdp_port(struct sk_buff *skb,
362 unsigned int buflen; 401 unsigned int buflen;
363 402
364 buflen = sprintf(buffer, "%u", port); 403 buflen = sprintf(buffer, "%u", port);
365 if (!mangle_packet(skb, dptr, datalen, matchoff, matchlen, 404 if (!mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
366 buffer, buflen)) 405 buffer, buflen))
367 return 0; 406 return 0;
368 407
369 return mangle_content_len(skb, dptr, datalen); 408 return mangle_content_len(skb, dataoff, dptr, datalen);
370} 409}
371 410
372static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr, 411static unsigned int ip_nat_sdp_session(struct sk_buff *skb, unsigned int dataoff,
373 unsigned int dataoff, 412 const char **dptr, unsigned int *datalen,
374 unsigned int *datalen, 413 unsigned int sdpoff,
375 const union nf_inet_addr *addr) 414 const union nf_inet_addr *addr)
376{ 415{
377 char buffer[sizeof("nnn.nnn.nnn.nnn")]; 416 char buffer[sizeof("nnn.nnn.nnn.nnn")];
@@ -379,12 +418,12 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr,
379 418
380 /* Mangle session description owner and contact addresses */ 419 /* Mangle session description owner and contact addresses */
381 buflen = sprintf(buffer, "%pI4", &addr->ip); 420 buflen = sprintf(buffer, "%pI4", &addr->ip);
382 if (mangle_sdp_packet(skb, dptr, dataoff, datalen, 421 if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff,
383 SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA, 422 SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA,
384 buffer, buflen)) 423 buffer, buflen))
385 return 0; 424 return 0;
386 425
387 switch (mangle_sdp_packet(skb, dptr, dataoff, datalen, 426 switch (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff,
388 SDP_HDR_CONNECTION_IP4, SDP_HDR_MEDIA, 427 SDP_HDR_CONNECTION_IP4, SDP_HDR_MEDIA,
389 buffer, buflen)) { 428 buffer, buflen)) {
390 case 0: 429 case 0:
@@ -401,14 +440,13 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr,
401 return 0; 440 return 0;
402 } 441 }
403 442
404 return mangle_content_len(skb, dptr, datalen); 443 return mangle_content_len(skb, dataoff, dptr, datalen);
405} 444}
406 445
407/* So, this packet has hit the connection tracking matching code. 446/* So, this packet has hit the connection tracking matching code.
408 Mangle it, and change the expectation to match the new version. */ 447 Mangle it, and change the expectation to match the new version. */
409static unsigned int ip_nat_sdp_media(struct sk_buff *skb, 448static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
410 const char **dptr, 449 const char **dptr, unsigned int *datalen,
411 unsigned int *datalen,
412 struct nf_conntrack_expect *rtp_exp, 450 struct nf_conntrack_expect *rtp_exp,
413 struct nf_conntrack_expect *rtcp_exp, 451 struct nf_conntrack_expect *rtcp_exp,
414 unsigned int mediaoff, 452 unsigned int mediaoff,
@@ -456,7 +494,8 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb,
456 494
457 /* Update media port. */ 495 /* Update media port. */
458 if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port && 496 if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port &&
459 !ip_nat_sdp_port(skb, dptr, datalen, mediaoff, medialen, port)) 497 !ip_nat_sdp_port(skb, dataoff, dptr, datalen,
498 mediaoff, medialen, port))
460 goto err2; 499 goto err2;
461 500
462 return NF_ACCEPT; 501 return NF_ACCEPT;
@@ -471,6 +510,7 @@ err1:
471static void __exit nf_nat_sip_fini(void) 510static void __exit nf_nat_sip_fini(void)
472{ 511{
473 rcu_assign_pointer(nf_nat_sip_hook, NULL); 512 rcu_assign_pointer(nf_nat_sip_hook, NULL);
513 rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, NULL);
474 rcu_assign_pointer(nf_nat_sip_expect_hook, NULL); 514 rcu_assign_pointer(nf_nat_sip_expect_hook, NULL);
475 rcu_assign_pointer(nf_nat_sdp_addr_hook, NULL); 515 rcu_assign_pointer(nf_nat_sdp_addr_hook, NULL);
476 rcu_assign_pointer(nf_nat_sdp_port_hook, NULL); 516 rcu_assign_pointer(nf_nat_sdp_port_hook, NULL);
@@ -482,12 +522,14 @@ static void __exit nf_nat_sip_fini(void)
482static int __init nf_nat_sip_init(void) 522static int __init nf_nat_sip_init(void)
483{ 523{
484 BUG_ON(nf_nat_sip_hook != NULL); 524 BUG_ON(nf_nat_sip_hook != NULL);
525 BUG_ON(nf_nat_sip_seq_adjust_hook != NULL);
485 BUG_ON(nf_nat_sip_expect_hook != NULL); 526 BUG_ON(nf_nat_sip_expect_hook != NULL);
486 BUG_ON(nf_nat_sdp_addr_hook != NULL); 527 BUG_ON(nf_nat_sdp_addr_hook != NULL);
487 BUG_ON(nf_nat_sdp_port_hook != NULL); 528 BUG_ON(nf_nat_sdp_port_hook != NULL);
488 BUG_ON(nf_nat_sdp_session_hook != NULL); 529 BUG_ON(nf_nat_sdp_session_hook != NULL);
489 BUG_ON(nf_nat_sdp_media_hook != NULL); 530 BUG_ON(nf_nat_sdp_media_hook != NULL);
490 rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip); 531 rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip);
532 rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust);
491 rcu_assign_pointer(nf_nat_sip_expect_hook, ip_nat_sip_expect); 533 rcu_assign_pointer(nf_nat_sip_expect_hook, ip_nat_sip_expect);
492 rcu_assign_pointer(nf_nat_sdp_addr_hook, ip_nat_sdp_addr); 534 rcu_assign_pointer(nf_nat_sdp_addr_hook, ip_nat_sdp_addr);
493 rcu_assign_pointer(nf_nat_sdp_port_hook, ip_nat_sdp_port); 535 rcu_assign_pointer(nf_nat_sdp_port_hook, ip_nat_sdp_port);
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index d9521f6f9ed0..0b9c7ce3d6c5 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1038,7 +1038,7 @@ static int snmp_parse_mangle(unsigned char *msg,
1038 unsigned int cls, con, tag, vers, pdutype; 1038 unsigned int cls, con, tag, vers, pdutype;
1039 struct asn1_ctx ctx; 1039 struct asn1_ctx ctx;
1040 struct asn1_octstr comm; 1040 struct asn1_octstr comm;
1041 struct snmp_object **obj; 1041 struct snmp_object *obj;
1042 1042
1043 if (debug > 1) 1043 if (debug > 1)
1044 hex_dump(msg, len); 1044 hex_dump(msg, len);
@@ -1148,43 +1148,34 @@ static int snmp_parse_mangle(unsigned char *msg,
1148 if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) 1148 if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
1149 return 0; 1149 return 0;
1150 1150
1151 obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC);
1152 if (obj == NULL) {
1153 if (net_ratelimit())
1154 printk(KERN_WARNING "OOM in bsalg(%d)\n", __LINE__);
1155 return 0;
1156 }
1157
1158 while (!asn1_eoc_decode(&ctx, eoc)) { 1151 while (!asn1_eoc_decode(&ctx, eoc)) {
1159 unsigned int i; 1152 unsigned int i;
1160 1153
1161 if (!snmp_object_decode(&ctx, obj)) { 1154 if (!snmp_object_decode(&ctx, &obj)) {
1162 if (*obj) { 1155 if (obj) {
1163 kfree((*obj)->id); 1156 kfree(obj->id);
1164 kfree(*obj); 1157 kfree(obj);
1165 } 1158 }
1166 kfree(obj);
1167 return 0; 1159 return 0;
1168 } 1160 }
1169 1161
1170 if (debug > 1) { 1162 if (debug > 1) {
1171 printk(KERN_DEBUG "bsalg: object: "); 1163 printk(KERN_DEBUG "bsalg: object: ");
1172 for (i = 0; i < (*obj)->id_len; i++) { 1164 for (i = 0; i < obj->id_len; i++) {
1173 if (i > 0) 1165 if (i > 0)
1174 printk("."); 1166 printk(".");
1175 printk("%lu", (*obj)->id[i]); 1167 printk("%lu", obj->id[i]);
1176 } 1168 }
1177 printk(": type=%u\n", (*obj)->type); 1169 printk(": type=%u\n", obj->type);
1178 1170
1179 } 1171 }
1180 1172
1181 if ((*obj)->type == SNMP_IPADDR) 1173 if (obj->type == SNMP_IPADDR)
1182 mangle_address(ctx.begin, ctx.pointer - 4 , map, check); 1174 mangle_address(ctx.begin, ctx.pointer - 4 , map, check);
1183 1175
1184 kfree((*obj)->id); 1176 kfree(obj->id);
1185 kfree(*obj); 1177 kfree(obj);
1186 } 1178 }
1187 kfree(obj);
1188 1179
1189 if (!asn1_eoc_decode(&ctx, eoc)) 1180 if (!asn1_eoc_decode(&ctx, eoc))
1190 return 0; 1181 return 0;
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index f25542c48b7d..4f1f337f4337 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -127,8 +127,8 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
127 SNMP_MIB_SENTINEL 127 SNMP_MIB_SENTINEL
128}; 128};
129 129
130static struct { 130static const struct {
131 char *name; 131 const char *name;
132 int index; 132 int index;
133} icmpmibmap[] = { 133} icmpmibmap[] = {
134 { "DestUnreachs", ICMP_DEST_UNREACH }, 134 { "DestUnreachs", ICMP_DEST_UNREACH },
@@ -249,6 +249,8 @@ static const struct snmp_mib snmp4_net_list[] = {
249 SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED), 249 SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED),
250 SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED), 250 SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED),
251 SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), 251 SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK),
252 SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP),
253 SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
252 SNMP_MIB_SENTINEL 254 SNMP_MIB_SENTINEL
253}; 255};
254 256
@@ -280,7 +282,7 @@ static void icmpmsg_put(struct seq_file *seq)
280 282
281 count = 0; 283 count = 0;
282 for (i = 0; i < ICMPMSG_MIB_MAX; i++) { 284 for (i = 0; i < ICMPMSG_MIB_MAX; i++) {
283 val = snmp_fold_field((void **) net->mib.icmpmsg_statistics, i); 285 val = snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, i);
284 if (val) { 286 if (val) {
285 type[count] = i; 287 type[count] = i;
286 vals[count++] = val; 288 vals[count++] = val;
@@ -307,18 +309,18 @@ static void icmp_put(struct seq_file *seq)
307 for (i=0; icmpmibmap[i].name != NULL; i++) 309 for (i=0; icmpmibmap[i].name != NULL; i++)
308 seq_printf(seq, " Out%s", icmpmibmap[i].name); 310 seq_printf(seq, " Out%s", icmpmibmap[i].name);
309 seq_printf(seq, "\nIcmp: %lu %lu", 311 seq_printf(seq, "\nIcmp: %lu %lu",
310 snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_INMSGS), 312 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INMSGS),
311 snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_INERRORS)); 313 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS));
312 for (i=0; icmpmibmap[i].name != NULL; i++) 314 for (i=0; icmpmibmap[i].name != NULL; i++)
313 seq_printf(seq, " %lu", 315 seq_printf(seq, " %lu",
314 snmp_fold_field((void **) net->mib.icmpmsg_statistics, 316 snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
315 icmpmibmap[i].index)); 317 icmpmibmap[i].index));
316 seq_printf(seq, " %lu %lu", 318 seq_printf(seq, " %lu %lu",
317 snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS), 319 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
318 snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS)); 320 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
319 for (i=0; icmpmibmap[i].name != NULL; i++) 321 for (i=0; icmpmibmap[i].name != NULL; i++)
320 seq_printf(seq, " %lu", 322 seq_printf(seq, " %lu",
321 snmp_fold_field((void **) net->mib.icmpmsg_statistics, 323 snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
322 icmpmibmap[i].index | 0x100)); 324 icmpmibmap[i].index | 0x100));
323} 325}
324 326
@@ -341,7 +343,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
341 343
342 for (i = 0; snmp4_ipstats_list[i].name != NULL; i++) 344 for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
343 seq_printf(seq, " %lu", 345 seq_printf(seq, " %lu",
344 snmp_fold_field((void **)net->mib.ip_statistics, 346 snmp_fold_field((void __percpu **)net->mib.ip_statistics,
345 snmp4_ipstats_list[i].entry)); 347 snmp4_ipstats_list[i].entry));
346 348
347 icmp_put(seq); /* RFC 2011 compatibility */ 349 icmp_put(seq); /* RFC 2011 compatibility */
@@ -356,11 +358,11 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
356 /* MaxConn field is signed, RFC 2012 */ 358 /* MaxConn field is signed, RFC 2012 */
357 if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN) 359 if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
358 seq_printf(seq, " %ld", 360 seq_printf(seq, " %ld",
359 snmp_fold_field((void **)net->mib.tcp_statistics, 361 snmp_fold_field((void __percpu **)net->mib.tcp_statistics,
360 snmp4_tcp_list[i].entry)); 362 snmp4_tcp_list[i].entry));
361 else 363 else
362 seq_printf(seq, " %lu", 364 seq_printf(seq, " %lu",
363 snmp_fold_field((void **)net->mib.tcp_statistics, 365 snmp_fold_field((void __percpu **)net->mib.tcp_statistics,
364 snmp4_tcp_list[i].entry)); 366 snmp4_tcp_list[i].entry));
365 } 367 }
366 368
@@ -371,7 +373,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
371 seq_puts(seq, "\nUdp:"); 373 seq_puts(seq, "\nUdp:");
372 for (i = 0; snmp4_udp_list[i].name != NULL; i++) 374 for (i = 0; snmp4_udp_list[i].name != NULL; i++)
373 seq_printf(seq, " %lu", 375 seq_printf(seq, " %lu",
374 snmp_fold_field((void **)net->mib.udp_statistics, 376 snmp_fold_field((void __percpu **)net->mib.udp_statistics,
375 snmp4_udp_list[i].entry)); 377 snmp4_udp_list[i].entry));
376 378
377 /* the UDP and UDP-Lite MIBs are the same */ 379 /* the UDP and UDP-Lite MIBs are the same */
@@ -382,7 +384,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
382 seq_puts(seq, "\nUdpLite:"); 384 seq_puts(seq, "\nUdpLite:");
383 for (i = 0; snmp4_udp_list[i].name != NULL; i++) 385 for (i = 0; snmp4_udp_list[i].name != NULL; i++)
384 seq_printf(seq, " %lu", 386 seq_printf(seq, " %lu",
385 snmp_fold_field((void **)net->mib.udplite_statistics, 387 snmp_fold_field((void __percpu **)net->mib.udplite_statistics,
386 snmp4_udp_list[i].entry)); 388 snmp4_udp_list[i].entry));
387 389
388 seq_putc(seq, '\n'); 390 seq_putc(seq, '\n');
@@ -419,7 +421,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
419 seq_puts(seq, "\nTcpExt:"); 421 seq_puts(seq, "\nTcpExt:");
420 for (i = 0; snmp4_net_list[i].name != NULL; i++) 422 for (i = 0; snmp4_net_list[i].name != NULL; i++)
421 seq_printf(seq, " %lu", 423 seq_printf(seq, " %lu",
422 snmp_fold_field((void **)net->mib.net_statistics, 424 snmp_fold_field((void __percpu **)net->mib.net_statistics,
423 snmp4_net_list[i].entry)); 425 snmp4_net_list[i].entry));
424 426
425 seq_puts(seq, "\nIpExt:"); 427 seq_puts(seq, "\nIpExt:");
@@ -429,7 +431,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
429 seq_puts(seq, "\nIpExt:"); 431 seq_puts(seq, "\nIpExt:");
430 for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++) 432 for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
431 seq_printf(seq, " %lu", 433 seq_printf(seq, " %lu",
432 snmp_fold_field((void **)net->mib.ip_statistics, 434 snmp_fold_field((void __percpu **)net->mib.ip_statistics,
433 snmp4_ipextstats_list[i].entry)); 435 snmp4_ipextstats_list[i].entry));
434 436
435 seq_putc(seq, '\n'); 437 seq_putc(seq, '\n');
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index e446496f564f..d413b57be9b3 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -146,7 +146,6 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
146static void ipv4_link_failure(struct sk_buff *skb); 146static void ipv4_link_failure(struct sk_buff *skb);
147static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu); 147static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
148static int rt_garbage_collect(struct dst_ops *ops); 148static int rt_garbage_collect(struct dst_ops *ops);
149static void rt_emergency_hash_rebuild(struct net *net);
150 149
151 150
152static struct dst_ops ipv4_dst_ops = { 151static struct dst_ops ipv4_dst_ops = {
@@ -287,12 +286,12 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
287 if (!rt_hash_table[st->bucket].chain) 286 if (!rt_hash_table[st->bucket].chain)
288 continue; 287 continue;
289 rcu_read_lock_bh(); 288 rcu_read_lock_bh();
290 r = rcu_dereference(rt_hash_table[st->bucket].chain); 289 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
291 while (r) { 290 while (r) {
292 if (dev_net(r->u.dst.dev) == seq_file_net(seq) && 291 if (dev_net(r->u.dst.dev) == seq_file_net(seq) &&
293 r->rt_genid == st->genid) 292 r->rt_genid == st->genid)
294 return r; 293 return r;
295 r = rcu_dereference(r->u.dst.rt_next); 294 r = rcu_dereference_bh(r->u.dst.rt_next);
296 } 295 }
297 rcu_read_unlock_bh(); 296 rcu_read_unlock_bh();
298 } 297 }
@@ -314,7 +313,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
314 rcu_read_lock_bh(); 313 rcu_read_lock_bh();
315 r = rt_hash_table[st->bucket].chain; 314 r = rt_hash_table[st->bucket].chain;
316 } 315 }
317 return rcu_dereference(r); 316 return rcu_dereference_bh(r);
318} 317}
319 318
320static struct rtable *rt_cache_get_next(struct seq_file *seq, 319static struct rtable *rt_cache_get_next(struct seq_file *seq,
@@ -586,7 +585,9 @@ static void __net_exit ip_rt_do_proc_exit(struct net *net)
586{ 585{
587 remove_proc_entry("rt_cache", net->proc_net_stat); 586 remove_proc_entry("rt_cache", net->proc_net_stat);
588 remove_proc_entry("rt_cache", net->proc_net); 587 remove_proc_entry("rt_cache", net->proc_net);
588#ifdef CONFIG_NET_CLS_ROUTE
589 remove_proc_entry("rt_acct", net->proc_net); 589 remove_proc_entry("rt_acct", net->proc_net);
590#endif
590} 591}
591 592
592static struct pernet_operations ip_rt_proc_ops __net_initdata = { 593static struct pernet_operations ip_rt_proc_ops __net_initdata = {
@@ -778,11 +779,30 @@ static void rt_do_flush(int process_context)
778#define FRACT_BITS 3 779#define FRACT_BITS 3
779#define ONE (1UL << FRACT_BITS) 780#define ONE (1UL << FRACT_BITS)
780 781
782/*
783 * Given a hash chain and an item in this hash chain,
784 * find if a previous entry has the same hash_inputs
785 * (but differs on tos, mark or oif)
786 * Returns 0 if an alias is found.
787 * Returns ONE if rth has no alias before itself.
788 */
789static int has_noalias(const struct rtable *head, const struct rtable *rth)
790{
791 const struct rtable *aux = head;
792
793 while (aux != rth) {
794 if (compare_hash_inputs(&aux->fl, &rth->fl))
795 return 0;
796 aux = aux->u.dst.rt_next;
797 }
798 return ONE;
799}
800
781static void rt_check_expire(void) 801static void rt_check_expire(void)
782{ 802{
783 static unsigned int rover; 803 static unsigned int rover;
784 unsigned int i = rover, goal; 804 unsigned int i = rover, goal;
785 struct rtable *rth, *aux, **rthp; 805 struct rtable *rth, **rthp;
786 unsigned long samples = 0; 806 unsigned long samples = 0;
787 unsigned long sum = 0, sum2 = 0; 807 unsigned long sum = 0, sum2 = 0;
788 unsigned long delta; 808 unsigned long delta;
@@ -833,15 +853,7 @@ nofree:
833 * attributes don't unfairly skew 853 * attributes don't unfairly skew
834 * the length computation 854 * the length computation
835 */ 855 */
836 for (aux = rt_hash_table[i].chain;;) { 856 length += has_noalias(rt_hash_table[i].chain, rth);
837 if (aux == rth) {
838 length += ONE;
839 break;
840 }
841 if (compare_hash_inputs(&aux->fl, &rth->fl))
842 break;
843 aux = aux->u.dst.rt_next;
844 }
845 continue; 857 continue;
846 } 858 }
847 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) 859 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
@@ -920,10 +932,8 @@ static void rt_secret_rebuild_oneshot(struct net *net)
920{ 932{
921 del_timer_sync(&net->ipv4.rt_secret_timer); 933 del_timer_sync(&net->ipv4.rt_secret_timer);
922 rt_cache_invalidate(net); 934 rt_cache_invalidate(net);
923 if (ip_rt_secret_interval) { 935 if (ip_rt_secret_interval)
924 net->ipv4.rt_secret_timer.expires += ip_rt_secret_interval; 936 mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
925 add_timer(&net->ipv4.rt_secret_timer);
926 }
927} 937}
928 938
929static void rt_emergency_hash_rebuild(struct net *net) 939static void rt_emergency_hash_rebuild(struct net *net)
@@ -1071,8 +1081,23 @@ work_done:
1071out: return 0; 1081out: return 0;
1072} 1082}
1073 1083
1084/*
1085 * Returns number of entries in a hash chain that have different hash_inputs
1086 */
1087static int slow_chain_length(const struct rtable *head)
1088{
1089 int length = 0;
1090 const struct rtable *rth = head;
1091
1092 while (rth) {
1093 length += has_noalias(head, rth);
1094 rth = rth->u.dst.rt_next;
1095 }
1096 return length >> FRACT_BITS;
1097}
1098
1074static int rt_intern_hash(unsigned hash, struct rtable *rt, 1099static int rt_intern_hash(unsigned hash, struct rtable *rt,
1075 struct rtable **rp, struct sk_buff *skb) 1100 struct rtable **rp, struct sk_buff *skb, int ifindex)
1076{ 1101{
1077 struct rtable *rth, **rthp; 1102 struct rtable *rth, **rthp;
1078 unsigned long now; 1103 unsigned long now;
@@ -1183,14 +1208,20 @@ restart:
1183 rt_free(cand); 1208 rt_free(cand);
1184 } 1209 }
1185 } else { 1210 } else {
1186 if (chain_length > rt_chain_length_max) { 1211 if (chain_length > rt_chain_length_max &&
1212 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
1187 struct net *net = dev_net(rt->u.dst.dev); 1213 struct net *net = dev_net(rt->u.dst.dev);
1188 int num = ++net->ipv4.current_rt_cache_rebuild_count; 1214 int num = ++net->ipv4.current_rt_cache_rebuild_count;
1189 if (!rt_caching(dev_net(rt->u.dst.dev))) { 1215 if (!rt_caching(net)) {
1190 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", 1216 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
1191 rt->u.dst.dev->name, num); 1217 rt->u.dst.dev->name, num);
1192 } 1218 }
1193 rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev)); 1219 rt_emergency_hash_rebuild(net);
1220 spin_unlock_bh(rt_hash_lock_addr(hash));
1221
1222 hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1223 ifindex, rt_genid(net));
1224 goto restart;
1194 } 1225 }
1195 } 1226 }
1196 1227
@@ -1415,7 +1446,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1415 dev_hold(rt->u.dst.dev); 1446 dev_hold(rt->u.dst.dev);
1416 if (rt->idev) 1447 if (rt->idev)
1417 in_dev_hold(rt->idev); 1448 in_dev_hold(rt->idev);
1418 rt->u.dst.obsolete = 0; 1449 rt->u.dst.obsolete = -1;
1419 rt->u.dst.lastuse = jiffies; 1450 rt->u.dst.lastuse = jiffies;
1420 rt->u.dst.path = &rt->u.dst; 1451 rt->u.dst.path = &rt->u.dst;
1421 rt->u.dst.neighbour = NULL; 1452 rt->u.dst.neighbour = NULL;
@@ -1451,7 +1482,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1451 &netevent); 1482 &netevent);
1452 1483
1453 rt_del(hash, rth); 1484 rt_del(hash, rth);
1454 if (!rt_intern_hash(hash, rt, &rt, NULL)) 1485 if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif))
1455 ip_rt_put(rt); 1486 ip_rt_put(rt);
1456 goto do_next; 1487 goto do_next;
1457 } 1488 }
@@ -1480,11 +1511,12 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1480 struct dst_entry *ret = dst; 1511 struct dst_entry *ret = dst;
1481 1512
1482 if (rt) { 1513 if (rt) {
1483 if (dst->obsolete) { 1514 if (dst->obsolete > 0) {
1484 ip_rt_put(rt); 1515 ip_rt_put(rt);
1485 ret = NULL; 1516 ret = NULL;
1486 } else if ((rt->rt_flags & RTCF_REDIRECTED) || 1517 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1487 rt->u.dst.expires) { 1518 (rt->u.dst.expires &&
1519 time_after_eq(jiffies, rt->u.dst.expires))) {
1488 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, 1520 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1489 rt->fl.oif, 1521 rt->fl.oif,
1490 rt_genid(dev_net(dst->dev))); 1522 rt_genid(dev_net(dst->dev)));
@@ -1700,7 +1732,9 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1700 1732
1701static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) 1733static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1702{ 1734{
1703 return NULL; 1735 if (rt_is_expired((struct rtable *)dst))
1736 return NULL;
1737 return dst;
1704} 1738}
1705 1739
1706static void ipv4_dst_destroy(struct dst_entry *dst) 1740static void ipv4_dst_destroy(struct dst_entry *dst)
@@ -1862,7 +1896,8 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1862 if (!rth) 1896 if (!rth)
1863 goto e_nobufs; 1897 goto e_nobufs;
1864 1898
1865 rth->u.dst.output= ip_rt_bug; 1899 rth->u.dst.output = ip_rt_bug;
1900 rth->u.dst.obsolete = -1;
1866 1901
1867 atomic_set(&rth->u.dst.__refcnt, 1); 1902 atomic_set(&rth->u.dst.__refcnt, 1);
1868 rth->u.dst.flags= DST_HOST; 1903 rth->u.dst.flags= DST_HOST;
@@ -1901,7 +1936,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1901 1936
1902 in_dev_put(in_dev); 1937 in_dev_put(in_dev);
1903 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); 1938 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1904 return rt_intern_hash(hash, rth, NULL, skb); 1939 return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex);
1905 1940
1906e_nobufs: 1941e_nobufs:
1907 in_dev_put(in_dev); 1942 in_dev_put(in_dev);
@@ -1988,8 +2023,13 @@ static int __mkroute_input(struct sk_buff *skb,
1988 if (skb->protocol != htons(ETH_P_IP)) { 2023 if (skb->protocol != htons(ETH_P_IP)) {
1989 /* Not IP (i.e. ARP). Do not create route, if it is 2024 /* Not IP (i.e. ARP). Do not create route, if it is
1990 * invalid for proxy arp. DNAT routes are always valid. 2025 * invalid for proxy arp. DNAT routes are always valid.
2026 *
2027 * Proxy arp feature have been extended to allow, ARP
2028 * replies back to the same interface, to support
2029 * Private VLAN switch technologies. See arp.c.
1991 */ 2030 */
1992 if (out_dev == in_dev) { 2031 if (out_dev == in_dev &&
2032 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1993 err = -EINVAL; 2033 err = -EINVAL;
1994 goto cleanup; 2034 goto cleanup;
1995 } 2035 }
@@ -2023,6 +2063,7 @@ static int __mkroute_input(struct sk_buff *skb,
2023 rth->fl.oif = 0; 2063 rth->fl.oif = 0;
2024 rth->rt_spec_dst= spec_dst; 2064 rth->rt_spec_dst= spec_dst;
2025 2065
2066 rth->u.dst.obsolete = -1;
2026 rth->u.dst.input = ip_forward; 2067 rth->u.dst.input = ip_forward;
2027 rth->u.dst.output = ip_output; 2068 rth->u.dst.output = ip_output;
2028 rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev)); 2069 rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev));
@@ -2062,7 +2103,7 @@ static int ip_mkroute_input(struct sk_buff *skb,
2062 /* put it into the cache */ 2103 /* put it into the cache */
2063 hash = rt_hash(daddr, saddr, fl->iif, 2104 hash = rt_hash(daddr, saddr, fl->iif,
2064 rt_genid(dev_net(rth->u.dst.dev))); 2105 rt_genid(dev_net(rth->u.dst.dev)));
2065 return rt_intern_hash(hash, rth, NULL, skb); 2106 return rt_intern_hash(hash, rth, NULL, skb, fl->iif);
2066} 2107}
2067 2108
2068/* 2109/*
@@ -2187,6 +2228,7 @@ local_input:
2187 goto e_nobufs; 2228 goto e_nobufs;
2188 2229
2189 rth->u.dst.output= ip_rt_bug; 2230 rth->u.dst.output= ip_rt_bug;
2231 rth->u.dst.obsolete = -1;
2190 rth->rt_genid = rt_genid(net); 2232 rth->rt_genid = rt_genid(net);
2191 2233
2192 atomic_set(&rth->u.dst.__refcnt, 1); 2234 atomic_set(&rth->u.dst.__refcnt, 1);
@@ -2218,7 +2260,7 @@ local_input:
2218 } 2260 }
2219 rth->rt_type = res.type; 2261 rth->rt_type = res.type;
2220 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); 2262 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
2221 err = rt_intern_hash(hash, rth, NULL, skb); 2263 err = rt_intern_hash(hash, rth, NULL, skb, fl.iif);
2222 goto done; 2264 goto done;
2223 2265
2224no_route: 2266no_route:
@@ -2413,6 +2455,7 @@ static int __mkroute_output(struct rtable **result,
2413 rth->rt_spec_dst= fl->fl4_src; 2455 rth->rt_spec_dst= fl->fl4_src;
2414 2456
2415 rth->u.dst.output=ip_output; 2457 rth->u.dst.output=ip_output;
2458 rth->u.dst.obsolete = -1;
2416 rth->rt_genid = rt_genid(dev_net(dev_out)); 2459 rth->rt_genid = rt_genid(dev_net(dev_out));
2417 2460
2418 RT_CACHE_STAT_INC(out_slow_tot); 2461 RT_CACHE_STAT_INC(out_slow_tot);
@@ -2464,7 +2507,7 @@ static int ip_mkroute_output(struct rtable **rp,
2464 if (err == 0) { 2507 if (err == 0) {
2465 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, 2508 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
2466 rt_genid(dev_net(dev_out))); 2509 rt_genid(dev_net(dev_out)));
2467 err = rt_intern_hash(hash, rth, rp, NULL); 2510 err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
2468 } 2511 }
2469 2512
2470 return err; 2513 return err;
@@ -2687,8 +2730,8 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2687 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net)); 2730 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
2688 2731
2689 rcu_read_lock_bh(); 2732 rcu_read_lock_bh();
2690 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2733 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
2691 rth = rcu_dereference(rth->u.dst.rt_next)) { 2734 rth = rcu_dereference_bh(rth->u.dst.rt_next)) {
2692 if (rth->fl.fl4_dst == flp->fl4_dst && 2735 if (rth->fl.fl4_dst == flp->fl4_dst &&
2693 rth->fl.fl4_src == flp->fl4_src && 2736 rth->fl.fl4_src == flp->fl4_src &&
2694 rth->fl.iif == 0 && 2737 rth->fl.iif == 0 &&
@@ -3006,8 +3049,8 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
3006 if (!rt_hash_table[h].chain) 3049 if (!rt_hash_table[h].chain)
3007 continue; 3050 continue;
3008 rcu_read_lock_bh(); 3051 rcu_read_lock_bh();
3009 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; 3052 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
3010 rt = rcu_dereference(rt->u.dst.rt_next), idx++) { 3053 rt = rcu_dereference_bh(rt->u.dst.rt_next), idx++) {
3011 if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx) 3054 if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
3012 continue; 3055 continue;
3013 if (rt_is_expired(rt)) 3056 if (rt_is_expired(rt))
@@ -3070,22 +3113,20 @@ static void rt_secret_reschedule(int old)
3070 rtnl_lock(); 3113 rtnl_lock();
3071 for_each_net(net) { 3114 for_each_net(net) {
3072 int deleted = del_timer_sync(&net->ipv4.rt_secret_timer); 3115 int deleted = del_timer_sync(&net->ipv4.rt_secret_timer);
3116 long time;
3073 3117
3074 if (!new) 3118 if (!new)
3075 continue; 3119 continue;
3076 3120
3077 if (deleted) { 3121 if (deleted) {
3078 long time = net->ipv4.rt_secret_timer.expires - jiffies; 3122 time = net->ipv4.rt_secret_timer.expires - jiffies;
3079 3123
3080 if (time <= 0 || (time += diff) <= 0) 3124 if (time <= 0 || (time += diff) <= 0)
3081 time = 0; 3125 time = 0;
3082
3083 net->ipv4.rt_secret_timer.expires = time;
3084 } else 3126 } else
3085 net->ipv4.rt_secret_timer.expires = new; 3127 time = new;
3086 3128
3087 net->ipv4.rt_secret_timer.expires += jiffies; 3129 mod_timer(&net->ipv4.rt_secret_timer, jiffies + time);
3088 add_timer(&net->ipv4.rt_secret_timer);
3089 } 3130 }
3090 rtnl_unlock(); 3131 rtnl_unlock();
3091} 3132}
@@ -3327,7 +3368,7 @@ static __net_initdata struct pernet_operations rt_secret_timer_ops = {
3327 3368
3328 3369
3329#ifdef CONFIG_NET_CLS_ROUTE 3370#ifdef CONFIG_NET_CLS_ROUTE
3330struct ip_rt_acct *ip_rt_acct __read_mostly; 3371struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3331#endif /* CONFIG_NET_CLS_ROUTE */ 3372#endif /* CONFIG_NET_CLS_ROUTE */
3332 3373
3333static __initdata unsigned long rhash_entries; 3374static __initdata unsigned long rhash_entries;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 26399ad2a289..5c24db4a3c91 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -277,6 +277,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
277 277
278 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); 278 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
279 279
280 /* check for timestamp cookie support */
281 memset(&tcp_opt, 0, sizeof(tcp_opt));
282 tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
283
284 if (tcp_opt.saw_tstamp)
285 cookie_check_timestamp(&tcp_opt);
286
280 ret = NULL; 287 ret = NULL;
281 req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ 288 req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
282 if (!req) 289 if (!req)
@@ -292,6 +299,12 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
292 ireq->loc_addr = ip_hdr(skb)->daddr; 299 ireq->loc_addr = ip_hdr(skb)->daddr;
293 ireq->rmt_addr = ip_hdr(skb)->saddr; 300 ireq->rmt_addr = ip_hdr(skb)->saddr;
294 ireq->ecn_ok = 0; 301 ireq->ecn_ok = 0;
302 ireq->snd_wscale = tcp_opt.snd_wscale;
303 ireq->rcv_wscale = tcp_opt.rcv_wscale;
304 ireq->sack_ok = tcp_opt.sack_ok;
305 ireq->wscale_ok = tcp_opt.wscale_ok;
306 ireq->tstamp_ok = tcp_opt.saw_tstamp;
307 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
295 308
296 /* We throwed the options of the initial SYN away, so we hope 309 /* We throwed the options of the initial SYN away, so we hope
297 * the ACK carries the same options again (see RFC1122 4.2.3.8) 310 * the ACK carries the same options again (see RFC1122 4.2.3.8)
@@ -340,26 +353,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
340 } 353 }
341 } 354 }
342 355
343 /* check for timestamp cookie support */
344 memset(&tcp_opt, 0, sizeof(tcp_opt));
345 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, &rt->u.dst);
346
347 if (tcp_opt.saw_tstamp)
348 cookie_check_timestamp(&tcp_opt);
349
350 ireq->snd_wscale = tcp_opt.snd_wscale;
351 ireq->rcv_wscale = tcp_opt.rcv_wscale;
352 ireq->sack_ok = tcp_opt.sack_ok;
353 ireq->wscale_ok = tcp_opt.wscale_ok;
354 ireq->tstamp_ok = tcp_opt.saw_tstamp;
355 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
356
357 /* Try to redo what tcp_v4_send_synack did. */ 356 /* Try to redo what tcp_v4_send_synack did. */
358 req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW); 357 req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW);
359 358
360 tcp_select_initial_window(tcp_full_space(sk), req->mss, 359 tcp_select_initial_window(tcp_full_space(sk), req->mss,
361 &req->rcv_wnd, &req->window_clamp, 360 &req->rcv_wnd, &req->window_clamp,
362 ireq->wscale_ok, &rcv_wscale); 361 ireq->wscale_ok, &rcv_wscale,
362 dst_metric(&rt->u.dst, RTAX_INITRWND));
363 363
364 ireq->rcv_wscale = rcv_wscale; 364 ireq->rcv_wscale = rcv_wscale;
365 365
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 7e3712ce3994..c1bc074f61b7 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -576,6 +576,20 @@ static struct ctl_table ipv4_table[] = {
576 .proc_handler = proc_dointvec 576 .proc_handler = proc_dointvec
577 }, 577 },
578 { 578 {
579 .procname = "tcp_thin_linear_timeouts",
580 .data = &sysctl_tcp_thin_linear_timeouts,
581 .maxlen = sizeof(int),
582 .mode = 0644,
583 .proc_handler = proc_dointvec
584 },
585 {
586 .procname = "tcp_thin_dupack",
587 .data = &sysctl_tcp_thin_dupack,
588 .maxlen = sizeof(int),
589 .mode = 0644,
590 .proc_handler = proc_dointvec
591 },
592 {
579 .procname = "udp_mem", 593 .procname = "udp_mem",
580 .data = &sysctl_udp_mem, 594 .data = &sysctl_udp_mem,
581 .maxlen = sizeof(sysctl_udp_mem), 595 .maxlen = sizeof(sysctl_udp_mem),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b0a26bb25e2e..6afb6d8662b2 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -429,7 +429,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
429 if (tp->urg_seq == tp->copied_seq && 429 if (tp->urg_seq == tp->copied_seq &&
430 !sock_flag(sk, SOCK_URGINLINE) && 430 !sock_flag(sk, SOCK_URGINLINE) &&
431 tp->urg_data) 431 tp->urg_data)
432 target--; 432 target++;
433 433
434 /* Potential race condition. If read of tp below will 434 /* Potential race condition. If read of tp below will
435 * escape above sk->sk_state, we can be illegally awaken 435 * escape above sk->sk_state, we can be illegally awaken
@@ -536,8 +536,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
536 tp->nonagle &= ~TCP_NAGLE_PUSH; 536 tp->nonagle &= ~TCP_NAGLE_PUSH;
537} 537}
538 538
539static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, 539static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
540 struct sk_buff *skb)
541{ 540{
542 if (flags & MSG_OOB) 541 if (flags & MSG_OOB)
543 tp->snd_up = tp->write_seq; 542 tp->snd_up = tp->write_seq;
@@ -546,13 +545,13 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
546static inline void tcp_push(struct sock *sk, int flags, int mss_now, 545static inline void tcp_push(struct sock *sk, int flags, int mss_now,
547 int nonagle) 546 int nonagle)
548{ 547{
549 struct tcp_sock *tp = tcp_sk(sk);
550
551 if (tcp_send_head(sk)) { 548 if (tcp_send_head(sk)) {
552 struct sk_buff *skb = tcp_write_queue_tail(sk); 549 struct tcp_sock *tp = tcp_sk(sk);
550
553 if (!(flags & MSG_MORE) || forced_push(tp)) 551 if (!(flags & MSG_MORE) || forced_push(tp))
554 tcp_mark_push(tp, skb); 552 tcp_mark_push(tp, tcp_write_queue_tail(sk));
555 tcp_mark_urg(tp, flags, skb); 553
554 tcp_mark_urg(tp, flags);
556 __tcp_push_pending_frames(sk, mss_now, 555 __tcp_push_pending_frames(sk, mss_now,
557 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); 556 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
558 } 557 }
@@ -877,12 +876,12 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
877#define TCP_PAGE(sk) (sk->sk_sndmsg_page) 876#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
878#define TCP_OFF(sk) (sk->sk_sndmsg_off) 877#define TCP_OFF(sk) (sk->sk_sndmsg_off)
879 878
880static inline int select_size(struct sock *sk) 879static inline int select_size(struct sock *sk, int sg)
881{ 880{
882 struct tcp_sock *tp = tcp_sk(sk); 881 struct tcp_sock *tp = tcp_sk(sk);
883 int tmp = tp->mss_cache; 882 int tmp = tp->mss_cache;
884 883
885 if (sk->sk_route_caps & NETIF_F_SG) { 884 if (sg) {
886 if (sk_can_gso(sk)) 885 if (sk_can_gso(sk))
887 tmp = 0; 886 tmp = 0;
888 else { 887 else {
@@ -906,7 +905,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
906 struct sk_buff *skb; 905 struct sk_buff *skb;
907 int iovlen, flags; 906 int iovlen, flags;
908 int mss_now, size_goal; 907 int mss_now, size_goal;
909 int err, copied; 908 int sg, err, copied;
910 long timeo; 909 long timeo;
911 910
912 lock_sock(sk); 911 lock_sock(sk);
@@ -934,6 +933,8 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
934 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 933 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
935 goto out_err; 934 goto out_err;
936 935
936 sg = sk->sk_route_caps & NETIF_F_SG;
937
937 while (--iovlen >= 0) { 938 while (--iovlen >= 0) {
938 int seglen = iov->iov_len; 939 int seglen = iov->iov_len;
939 unsigned char __user *from = iov->iov_base; 940 unsigned char __user *from = iov->iov_base;
@@ -959,8 +960,9 @@ new_segment:
959 if (!sk_stream_memory_free(sk)) 960 if (!sk_stream_memory_free(sk))
960 goto wait_for_sndbuf; 961 goto wait_for_sndbuf;
961 962
962 skb = sk_stream_alloc_skb(sk, select_size(sk), 963 skb = sk_stream_alloc_skb(sk,
963 sk->sk_allocation); 964 select_size(sk, sg),
965 sk->sk_allocation);
964 if (!skb) 966 if (!skb)
965 goto wait_for_memory; 967 goto wait_for_memory;
966 968
@@ -997,9 +999,7 @@ new_segment:
997 /* We can extend the last page 999 /* We can extend the last page
998 * fragment. */ 1000 * fragment. */
999 merge = 1; 1001 merge = 1;
1000 } else if (i == MAX_SKB_FRAGS || 1002 } else if (i == MAX_SKB_FRAGS || !sg) {
1001 (!i &&
1002 !(sk->sk_route_caps & NETIF_F_SG))) {
1003 /* Need to add new fragment and cannot 1003 /* Need to add new fragment and cannot
1004 * do this because interface is non-SG, 1004 * do this because interface is non-SG,
1005 * or because all the page slots are 1005 * or because all the page slots are
@@ -1254,6 +1254,39 @@ static void tcp_prequeue_process(struct sock *sk)
1254 tp->ucopy.memory = 0; 1254 tp->ucopy.memory = 0;
1255} 1255}
1256 1256
1257#ifdef CONFIG_NET_DMA
1258static void tcp_service_net_dma(struct sock *sk, bool wait)
1259{
1260 dma_cookie_t done, used;
1261 dma_cookie_t last_issued;
1262 struct tcp_sock *tp = tcp_sk(sk);
1263
1264 if (!tp->ucopy.dma_chan)
1265 return;
1266
1267 last_issued = tp->ucopy.dma_cookie;
1268 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1269
1270 do {
1271 if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1272 last_issued, &done,
1273 &used) == DMA_SUCCESS) {
1274 /* Safe to free early-copied skbs now */
1275 __skb_queue_purge(&sk->sk_async_wait_queue);
1276 break;
1277 } else {
1278 struct sk_buff *skb;
1279 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1280 (dma_async_is_complete(skb->dma_cookie, done,
1281 used) == DMA_SUCCESS)) {
1282 __skb_dequeue(&sk->sk_async_wait_queue);
1283 kfree_skb(skb);
1284 }
1285 }
1286 } while (wait);
1287}
1288#endif
1289
1257static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1290static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1258{ 1291{
1259 struct sk_buff *skb; 1292 struct sk_buff *skb;
@@ -1546,6 +1579,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1546 /* __ Set realtime policy in scheduler __ */ 1579 /* __ Set realtime policy in scheduler __ */
1547 } 1580 }
1548 1581
1582#ifdef CONFIG_NET_DMA
1583 if (tp->ucopy.dma_chan)
1584 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1585#endif
1549 if (copied >= target) { 1586 if (copied >= target) {
1550 /* Do not sleep, just process backlog. */ 1587 /* Do not sleep, just process backlog. */
1551 release_sock(sk); 1588 release_sock(sk);
@@ -1554,6 +1591,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1554 sk_wait_data(sk, &timeo); 1591 sk_wait_data(sk, &timeo);
1555 1592
1556#ifdef CONFIG_NET_DMA 1593#ifdef CONFIG_NET_DMA
1594 tcp_service_net_dma(sk, false); /* Don't block */
1557 tp->ucopy.wakeup = 0; 1595 tp->ucopy.wakeup = 0;
1558#endif 1596#endif
1559 1597
@@ -1633,6 +1671,9 @@ do_prequeue:
1633 copied = -EFAULT; 1671 copied = -EFAULT;
1634 break; 1672 break;
1635 } 1673 }
1674
1675 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1676
1636 if ((offset + used) == skb->len) 1677 if ((offset + used) == skb->len)
1637 copied_early = 1; 1678 copied_early = 1;
1638 1679
@@ -1702,27 +1743,9 @@ skip_copy:
1702 } 1743 }
1703 1744
1704#ifdef CONFIG_NET_DMA 1745#ifdef CONFIG_NET_DMA
1705 if (tp->ucopy.dma_chan) { 1746 tcp_service_net_dma(sk, true); /* Wait for queue to drain */
1706 dma_cookie_t done, used; 1747 tp->ucopy.dma_chan = NULL;
1707
1708 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1709
1710 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1711 tp->ucopy.dma_cookie, &done,
1712 &used) == DMA_IN_PROGRESS) {
1713 /* do partial cleanup of sk_async_wait_queue */
1714 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1715 (dma_async_is_complete(skb->dma_cookie, done,
1716 used) == DMA_SUCCESS)) {
1717 __skb_dequeue(&sk->sk_async_wait_queue);
1718 kfree_skb(skb);
1719 }
1720 }
1721 1748
1722 /* Safe to free early-copied skbs now */
1723 __skb_queue_purge(&sk->sk_async_wait_queue);
1724 tp->ucopy.dma_chan = NULL;
1725 }
1726 if (tp->ucopy.pinned_list) { 1749 if (tp->ucopy.pinned_list) {
1727 dma_unpin_iovec_pages(tp->ucopy.pinned_list); 1750 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1728 tp->ucopy.pinned_list = NULL; 1751 tp->ucopy.pinned_list = NULL;
@@ -2229,6 +2252,20 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2229 } 2252 }
2230 break; 2253 break;
2231 2254
2255 case TCP_THIN_LINEAR_TIMEOUTS:
2256 if (val < 0 || val > 1)
2257 err = -EINVAL;
2258 else
2259 tp->thin_lto = val;
2260 break;
2261
2262 case TCP_THIN_DUPACK:
2263 if (val < 0 || val > 1)
2264 err = -EINVAL;
2265 else
2266 tp->thin_dupack = val;
2267 break;
2268
2232 case TCP_CORK: 2269 case TCP_CORK:
2233 /* When set indicates to always queue non-full frames. 2270 /* When set indicates to always queue non-full frames.
2234 * Later the user clears this option and we transmit 2271 * Later the user clears this option and we transmit
@@ -2788,10 +2825,10 @@ EXPORT_SYMBOL(tcp_gro_complete);
2788 2825
2789#ifdef CONFIG_TCP_MD5SIG 2826#ifdef CONFIG_TCP_MD5SIG
2790static unsigned long tcp_md5sig_users; 2827static unsigned long tcp_md5sig_users;
2791static struct tcp_md5sig_pool **tcp_md5sig_pool; 2828static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool;
2792static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); 2829static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2793 2830
2794static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool) 2831static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool)
2795{ 2832{
2796 int cpu; 2833 int cpu;
2797 for_each_possible_cpu(cpu) { 2834 for_each_possible_cpu(cpu) {
@@ -2808,7 +2845,7 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2808 2845
2809void tcp_free_md5sig_pool(void) 2846void tcp_free_md5sig_pool(void)
2810{ 2847{
2811 struct tcp_md5sig_pool **pool = NULL; 2848 struct tcp_md5sig_pool * __percpu *pool = NULL;
2812 2849
2813 spin_lock_bh(&tcp_md5sig_pool_lock); 2850 spin_lock_bh(&tcp_md5sig_pool_lock);
2814 if (--tcp_md5sig_users == 0) { 2851 if (--tcp_md5sig_users == 0) {
@@ -2822,10 +2859,11 @@ void tcp_free_md5sig_pool(void)
2822 2859
2823EXPORT_SYMBOL(tcp_free_md5sig_pool); 2860EXPORT_SYMBOL(tcp_free_md5sig_pool);
2824 2861
2825static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(struct sock *sk) 2862static struct tcp_md5sig_pool * __percpu *
2863__tcp_alloc_md5sig_pool(struct sock *sk)
2826{ 2864{
2827 int cpu; 2865 int cpu;
2828 struct tcp_md5sig_pool **pool; 2866 struct tcp_md5sig_pool * __percpu *pool;
2829 2867
2830 pool = alloc_percpu(struct tcp_md5sig_pool *); 2868 pool = alloc_percpu(struct tcp_md5sig_pool *);
2831 if (!pool) 2869 if (!pool)
@@ -2852,9 +2890,9 @@ out_free:
2852 return NULL; 2890 return NULL;
2853} 2891}
2854 2892
2855struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *sk) 2893struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
2856{ 2894{
2857 struct tcp_md5sig_pool **pool; 2895 struct tcp_md5sig_pool * __percpu *pool;
2858 int alloc = 0; 2896 int alloc = 0;
2859 2897
2860retry: 2898retry:
@@ -2873,7 +2911,9 @@ retry:
2873 2911
2874 if (alloc) { 2912 if (alloc) {
2875 /* we cannot hold spinlock here because this may sleep. */ 2913 /* we cannot hold spinlock here because this may sleep. */
2876 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(sk); 2914 struct tcp_md5sig_pool * __percpu *p;
2915
2916 p = __tcp_alloc_md5sig_pool(sk);
2877 spin_lock_bh(&tcp_md5sig_pool_lock); 2917 spin_lock_bh(&tcp_md5sig_pool_lock);
2878 if (!p) { 2918 if (!p) {
2879 tcp_md5sig_users--; 2919 tcp_md5sig_users--;
@@ -2897,7 +2937,7 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2897 2937
2898struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) 2938struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2899{ 2939{
2900 struct tcp_md5sig_pool **p; 2940 struct tcp_md5sig_pool * __percpu *p;
2901 spin_lock_bh(&tcp_md5sig_pool_lock); 2941 spin_lock_bh(&tcp_md5sig_pool_lock);
2902 p = tcp_md5sig_pool; 2942 p = tcp_md5sig_pool;
2903 if (p) 2943 if (p)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 12cab7d74dba..c096a4218b8f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -89,6 +89,8 @@ int sysctl_tcp_frto __read_mostly = 2;
89int sysctl_tcp_frto_response __read_mostly; 89int sysctl_tcp_frto_response __read_mostly;
90int sysctl_tcp_nometrics_save __read_mostly; 90int sysctl_tcp_nometrics_save __read_mostly;
91 91
92int sysctl_tcp_thin_dupack __read_mostly;
93
92int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 94int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
93int sysctl_tcp_abc __read_mostly; 95int sysctl_tcp_abc __read_mostly;
94 96
@@ -2447,6 +2449,16 @@ static int tcp_time_to_recover(struct sock *sk)
2447 return 1; 2449 return 1;
2448 } 2450 }
2449 2451
2452 /* If a thin stream is detected, retransmit after first
2453 * received dupack. Employ only if SACK is supported in order
2454 * to avoid possible corner-case series of spurious retransmissions
2455 * Use only if there are no unsent data.
2456 */
2457 if ((tp->thin_dupack || sysctl_tcp_thin_dupack) &&
2458 tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 &&
2459 tcp_is_sack(tp) && !tcp_send_head(sk))
2460 return 1;
2461
2450 return 0; 2462 return 0;
2451} 2463}
2452 2464
@@ -2499,6 +2511,9 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
2499 int err; 2511 int err;
2500 unsigned int mss; 2512 unsigned int mss;
2501 2513
2514 if (packets == 0)
2515 return;
2516
2502 WARN_ON(packets > tp->packets_out); 2517 WARN_ON(packets > tp->packets_out);
2503 if (tp->lost_skb_hint) { 2518 if (tp->lost_skb_hint) {
2504 skb = tp->lost_skb_hint; 2519 skb = tp->lost_skb_hint;
@@ -3727,7 +3742,7 @@ old_ack:
3727 * the fast version below fails. 3742 * the fast version below fails.
3728 */ 3743 */
3729void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, 3744void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3730 u8 **hvpp, int estab, struct dst_entry *dst) 3745 u8 **hvpp, int estab)
3731{ 3746{
3732 unsigned char *ptr; 3747 unsigned char *ptr;
3733 struct tcphdr *th = tcp_hdr(skb); 3748 struct tcphdr *th = tcp_hdr(skb);
@@ -3766,8 +3781,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3766 break; 3781 break;
3767 case TCPOPT_WINDOW: 3782 case TCPOPT_WINDOW:
3768 if (opsize == TCPOLEN_WINDOW && th->syn && 3783 if (opsize == TCPOLEN_WINDOW && th->syn &&
3769 !estab && sysctl_tcp_window_scaling && 3784 !estab && sysctl_tcp_window_scaling) {
3770 !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)) {
3771 __u8 snd_wscale = *(__u8 *)ptr; 3785 __u8 snd_wscale = *(__u8 *)ptr;
3772 opt_rx->wscale_ok = 1; 3786 opt_rx->wscale_ok = 1;
3773 if (snd_wscale > 14) { 3787 if (snd_wscale > 14) {
@@ -3783,8 +3797,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3783 case TCPOPT_TIMESTAMP: 3797 case TCPOPT_TIMESTAMP:
3784 if ((opsize == TCPOLEN_TIMESTAMP) && 3798 if ((opsize == TCPOLEN_TIMESTAMP) &&
3785 ((estab && opt_rx->tstamp_ok) || 3799 ((estab && opt_rx->tstamp_ok) ||
3786 (!estab && sysctl_tcp_timestamps && 3800 (!estab && sysctl_tcp_timestamps))) {
3787 !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP)))) {
3788 opt_rx->saw_tstamp = 1; 3801 opt_rx->saw_tstamp = 1;
3789 opt_rx->rcv_tsval = get_unaligned_be32(ptr); 3802 opt_rx->rcv_tsval = get_unaligned_be32(ptr);
3790 opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); 3803 opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
@@ -3792,8 +3805,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3792 break; 3805 break;
3793 case TCPOPT_SACK_PERM: 3806 case TCPOPT_SACK_PERM:
3794 if (opsize == TCPOLEN_SACK_PERM && th->syn && 3807 if (opsize == TCPOLEN_SACK_PERM && th->syn &&
3795 !estab && sysctl_tcp_sack && 3808 !estab && sysctl_tcp_sack) {
3796 !dst_feature(dst, RTAX_FEATURE_NO_SACK)) {
3797 opt_rx->sack_ok = 1; 3809 opt_rx->sack_ok = 1;
3798 tcp_sack_reset(opt_rx); 3810 tcp_sack_reset(opt_rx);
3799 } 3811 }
@@ -3878,7 +3890,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
3878 if (tcp_parse_aligned_timestamp(tp, th)) 3890 if (tcp_parse_aligned_timestamp(tp, th))
3879 return 1; 3891 return 1;
3880 } 3892 }
3881 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL); 3893 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
3882 return 1; 3894 return 1;
3883} 3895}
3884 3896
@@ -4133,10 +4145,8 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
4133static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) 4145static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
4134{ 4146{
4135 struct tcp_sock *tp = tcp_sk(sk); 4147 struct tcp_sock *tp = tcp_sk(sk);
4136 struct dst_entry *dst = __sk_dst_get(sk);
4137 4148
4138 if (tcp_is_sack(tp) && sysctl_tcp_dsack && 4149 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
4139 !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) {
4140 int mib_idx; 4150 int mib_idx;
4141 4151
4142 if (before(seq, tp->rcv_nxt)) 4152 if (before(seq, tp->rcv_nxt))
@@ -4165,15 +4175,13 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
4165static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) 4175static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
4166{ 4176{
4167 struct tcp_sock *tp = tcp_sk(sk); 4177 struct tcp_sock *tp = tcp_sk(sk);
4168 struct dst_entry *dst = __sk_dst_get(sk);
4169 4178
4170 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 4179 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
4171 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4180 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4172 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4181 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4173 tcp_enter_quickack_mode(sk); 4182 tcp_enter_quickack_mode(sk);
4174 4183
4175 if (tcp_is_sack(tp) && sysctl_tcp_dsack && 4184 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
4176 !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) {
4177 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 4185 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
4178 4186
4179 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) 4187 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
@@ -5428,11 +5436,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5428 u8 *hash_location; 5436 u8 *hash_location;
5429 struct inet_connection_sock *icsk = inet_csk(sk); 5437 struct inet_connection_sock *icsk = inet_csk(sk);
5430 struct tcp_sock *tp = tcp_sk(sk); 5438 struct tcp_sock *tp = tcp_sk(sk);
5431 struct dst_entry *dst = __sk_dst_get(sk);
5432 struct tcp_cookie_values *cvp = tp->cookie_values; 5439 struct tcp_cookie_values *cvp = tp->cookie_values;
5433 int saved_clamp = tp->rx_opt.mss_clamp; 5440 int saved_clamp = tp->rx_opt.mss_clamp;
5434 5441
5435 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, dst); 5442 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0);
5436 5443
5437 if (th->ack) { 5444 if (th->ack) {
5438 /* rfc793: 5445 /* rfc793:
@@ -5791,11 +5798,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5791 5798
5792 /* tcp_ack considers this ACK as duplicate 5799 /* tcp_ack considers this ACK as duplicate
5793 * and does not calculate rtt. 5800 * and does not calculate rtt.
5794 * Fix it at least with timestamps. 5801 * Force it here.
5795 */ 5802 */
5796 if (tp->rx_opt.saw_tstamp && 5803 tcp_ack_update_rtt(sk, 0, 0);
5797 tp->rx_opt.rcv_tsecr && !tp->srtt)
5798 tcp_ack_saw_tstamp(sk, 0);
5799 5804
5800 if (tp->rx_opt.tstamp_ok) 5805 if (tp->rx_opt.tstamp_ok)
5801 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 5806 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 15e96030ce47..f4df5f931f36 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -370,6 +370,11 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
370 if (sk->sk_state == TCP_CLOSE) 370 if (sk->sk_state == TCP_CLOSE)
371 goto out; 371 goto out;
372 372
373 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
374 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
375 goto out;
376 }
377
373 icsk = inet_csk(sk); 378 icsk = inet_csk(sk);
374 tp = tcp_sk(sk); 379 tp = tcp_sk(sk);
375 seq = ntohl(th->seq); 380 seq = ntohl(th->seq);
@@ -742,9 +747,9 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
742 * This still operates on a request_sock only, not on a big 747 * This still operates on a request_sock only, not on a big
743 * socket. 748 * socket.
744 */ 749 */
745static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, 750static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
746 struct request_sock *req, 751 struct request_sock *req,
747 struct request_values *rvp) 752 struct request_values *rvp)
748{ 753{
749 const struct inet_request_sock *ireq = inet_rsk(req); 754 const struct inet_request_sock *ireq = inet_rsk(req);
750 int err = -1; 755 int err = -1;
@@ -775,10 +780,11 @@ static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
775 return err; 780 return err;
776} 781}
777 782
778static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, 783static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
779 struct request_values *rvp) 784 struct request_values *rvp)
780{ 785{
781 return __tcp_v4_send_synack(sk, NULL, req, rvp); 786 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
787 return tcp_v4_send_synack(sk, NULL, req, rvp);
782} 788}
783 789
784/* 790/*
@@ -1192,10 +1198,11 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1192struct request_sock_ops tcp_request_sock_ops __read_mostly = { 1198struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1193 .family = PF_INET, 1199 .family = PF_INET,
1194 .obj_size = sizeof(struct tcp_request_sock), 1200 .obj_size = sizeof(struct tcp_request_sock),
1195 .rtx_syn_ack = tcp_v4_send_synack, 1201 .rtx_syn_ack = tcp_v4_rtx_synack,
1196 .send_ack = tcp_v4_reqsk_send_ack, 1202 .send_ack = tcp_v4_reqsk_send_ack,
1197 .destructor = tcp_v4_reqsk_destructor, 1203 .destructor = tcp_v4_reqsk_destructor,
1198 .send_reset = tcp_v4_send_reset, 1204 .send_reset = tcp_v4_send_reset,
1205 .syn_ack_timeout = tcp_syn_ack_timeout,
1199}; 1206};
1200 1207
1201#ifdef CONFIG_TCP_MD5SIG 1208#ifdef CONFIG_TCP_MD5SIG
@@ -1262,20 +1269,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1262 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; 1269 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1263#endif 1270#endif
1264 1271
1265 ireq = inet_rsk(req);
1266 ireq->loc_addr = daddr;
1267 ireq->rmt_addr = saddr;
1268 ireq->no_srccheck = inet_sk(sk)->transparent;
1269 ireq->opt = tcp_v4_save_options(sk, skb);
1270
1271 dst = inet_csk_route_req(sk, req);
1272 if(!dst)
1273 goto drop_and_free;
1274
1275 tcp_clear_options(&tmp_opt); 1272 tcp_clear_options(&tmp_opt);
1276 tmp_opt.mss_clamp = TCP_MSS_DEFAULT; 1273 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1277 tmp_opt.user_mss = tp->rx_opt.user_mss; 1274 tmp_opt.user_mss = tp->rx_opt.user_mss;
1278 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst); 1275 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1279 1276
1280 if (tmp_opt.cookie_plus > 0 && 1277 if (tmp_opt.cookie_plus > 0 &&
1281 tmp_opt.saw_tstamp && 1278 tmp_opt.saw_tstamp &&
@@ -1319,8 +1316,14 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1319 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 1316 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1320 tcp_openreq_init(req, &tmp_opt, skb); 1317 tcp_openreq_init(req, &tmp_opt, skb);
1321 1318
1319 ireq = inet_rsk(req);
1320 ireq->loc_addr = daddr;
1321 ireq->rmt_addr = saddr;
1322 ireq->no_srccheck = inet_sk(sk)->transparent;
1323 ireq->opt = tcp_v4_save_options(sk, skb);
1324
1322 if (security_inet_conn_request(sk, skb, req)) 1325 if (security_inet_conn_request(sk, skb, req))
1323 goto drop_and_release; 1326 goto drop_and_free;
1324 1327
1325 if (!want_cookie) 1328 if (!want_cookie)
1326 TCP_ECN_create_request(req, tcp_hdr(skb)); 1329 TCP_ECN_create_request(req, tcp_hdr(skb));
@@ -1345,6 +1348,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1345 */ 1348 */
1346 if (tmp_opt.saw_tstamp && 1349 if (tmp_opt.saw_tstamp &&
1347 tcp_death_row.sysctl_tw_recycle && 1350 tcp_death_row.sysctl_tw_recycle &&
1351 (dst = inet_csk_route_req(sk, req)) != NULL &&
1348 (peer = rt_get_peer((struct rtable *)dst)) != NULL && 1352 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1349 peer->v4daddr == saddr) { 1353 peer->v4daddr == saddr) {
1350 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && 1354 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
@@ -1376,8 +1380,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1376 } 1380 }
1377 tcp_rsk(req)->snt_isn = isn; 1381 tcp_rsk(req)->snt_isn = isn;
1378 1382
1379 if (__tcp_v4_send_synack(sk, dst, req, 1383 if (tcp_v4_send_synack(sk, dst, req,
1380 (struct request_values *)&tmp_ext) || 1384 (struct request_values *)&tmp_ext) ||
1381 want_cookie) 1385 want_cookie)
1382 goto drop_and_free; 1386 goto drop_and_free;
1383 1387
@@ -1656,6 +1660,11 @@ process:
1656 if (sk->sk_state == TCP_TIME_WAIT) 1660 if (sk->sk_state == TCP_TIME_WAIT)
1657 goto do_time_wait; 1661 goto do_time_wait;
1658 1662
1663 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1664 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1665 goto discard_and_relse;
1666 }
1667
1659 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 1668 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1660 goto discard_and_relse; 1669 goto discard_and_relse;
1661 nf_reset(skb); 1670 nf_reset(skb);
@@ -1680,8 +1689,11 @@ process:
1680 if (!tcp_prequeue(sk, skb)) 1689 if (!tcp_prequeue(sk, skb))
1681 ret = tcp_v4_do_rcv(sk, skb); 1690 ret = tcp_v4_do_rcv(sk, skb);
1682 } 1691 }
1683 } else 1692 } else if (unlikely(sk_add_backlog(sk, skb))) {
1684 sk_add_backlog(sk, skb); 1693 bh_unlock_sock(sk);
1694 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1695 goto discard_and_relse;
1696 }
1685 bh_unlock_sock(sk); 1697 bh_unlock_sock(sk);
1686 1698
1687 sock_put(sk); 1699 sock_put(sk);
@@ -2428,12 +2440,12 @@ static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2428 }, 2440 },
2429}; 2441};
2430 2442
2431static int tcp4_proc_init_net(struct net *net) 2443static int __net_init tcp4_proc_init_net(struct net *net)
2432{ 2444{
2433 return tcp_proc_register(net, &tcp4_seq_afinfo); 2445 return tcp_proc_register(net, &tcp4_seq_afinfo);
2434} 2446}
2435 2447
2436static void tcp4_proc_exit_net(struct net *net) 2448static void __net_exit tcp4_proc_exit_net(struct net *net)
2437{ 2449{
2438 tcp_proc_unregister(net, &tcp4_seq_afinfo); 2450 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2439} 2451}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 87accec8d097..4199bc6915c5 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -95,9 +95,9 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
95 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 95 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
96 int paws_reject = 0; 96 int paws_reject = 0;
97 97
98 tmp_opt.saw_tstamp = 0;
98 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { 99 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
99 tmp_opt.tstamp_ok = 1; 100 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
100 tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL);
101 101
102 if (tmp_opt.saw_tstamp) { 102 if (tmp_opt.saw_tstamp) {
103 tmp_opt.ts_recent = tcptw->tw_ts_recent; 103 tmp_opt.ts_recent = tcptw->tw_ts_recent;
@@ -526,9 +526,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
526 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 526 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
527 int paws_reject = 0; 527 int paws_reject = 0;
528 528
529 if ((th->doff > (sizeof(*th) >> 2)) && (req->ts_recent)) { 529 tmp_opt.saw_tstamp = 0;
530 tmp_opt.tstamp_ok = 1; 530 if (th->doff > (sizeof(struct tcphdr)>>2)) {
531 tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL); 531 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
532 532
533 if (tmp_opt.saw_tstamp) { 533 if (tmp_opt.saw_tstamp) {
534 tmp_opt.ts_recent = req->ts_recent; 534 tmp_opt.ts_recent = req->ts_recent;
@@ -728,7 +728,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
728 * in main socket hash table and lock on listening 728 * in main socket hash table and lock on listening
729 * socket does not protect us more. 729 * socket does not protect us more.
730 */ 730 */
731 sk_add_backlog(child, skb); 731 __sk_add_backlog(child, skb);
732 } 732 }
733 733
734 bh_unlock_sock(child); 734 bh_unlock_sock(child);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 93316a96d820..f181b78f2385 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -183,7 +183,8 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
183 */ 183 */
184void tcp_select_initial_window(int __space, __u32 mss, 184void tcp_select_initial_window(int __space, __u32 mss,
185 __u32 *rcv_wnd, __u32 *window_clamp, 185 __u32 *rcv_wnd, __u32 *window_clamp,
186 int wscale_ok, __u8 *rcv_wscale) 186 int wscale_ok, __u8 *rcv_wscale,
187 __u32 init_rcv_wnd)
187{ 188{
188 unsigned int space = (__space < 0 ? 0 : __space); 189 unsigned int space = (__space < 0 ? 0 : __space);
189 190
@@ -232,7 +233,13 @@ void tcp_select_initial_window(int __space, __u32 mss,
232 init_cwnd = 2; 233 init_cwnd = 2;
233 else if (mss > 1460) 234 else if (mss > 1460)
234 init_cwnd = 3; 235 init_cwnd = 3;
235 if (*rcv_wnd > init_cwnd * mss) 236 /* when initializing use the value from init_rcv_wnd
237 * rather than the default from above
238 */
239 if (init_rcv_wnd &&
240 (*rcv_wnd > init_rcv_wnd * mss))
241 *rcv_wnd = init_rcv_wnd * mss;
242 else if (*rcv_wnd > init_cwnd * mss)
236 *rcv_wnd = init_cwnd * mss; 243 *rcv_wnd = init_cwnd * mss;
237 } 244 }
238 245
@@ -553,7 +560,6 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
553 struct tcp_md5sig_key **md5) { 560 struct tcp_md5sig_key **md5) {
554 struct tcp_sock *tp = tcp_sk(sk); 561 struct tcp_sock *tp = tcp_sk(sk);
555 struct tcp_cookie_values *cvp = tp->cookie_values; 562 struct tcp_cookie_values *cvp = tp->cookie_values;
556 struct dst_entry *dst = __sk_dst_get(sk);
557 unsigned remaining = MAX_TCP_OPTION_SPACE; 563 unsigned remaining = MAX_TCP_OPTION_SPACE;
558 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? 564 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
559 tcp_cookie_size_check(cvp->cookie_desired) : 565 tcp_cookie_size_check(cvp->cookie_desired) :
@@ -581,22 +587,18 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
581 opts->mss = tcp_advertise_mss(sk); 587 opts->mss = tcp_advertise_mss(sk);
582 remaining -= TCPOLEN_MSS_ALIGNED; 588 remaining -= TCPOLEN_MSS_ALIGNED;
583 589
584 if (likely(sysctl_tcp_timestamps && 590 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
585 !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) &&
586 *md5 == NULL)) {
587 opts->options |= OPTION_TS; 591 opts->options |= OPTION_TS;
588 opts->tsval = TCP_SKB_CB(skb)->when; 592 opts->tsval = TCP_SKB_CB(skb)->when;
589 opts->tsecr = tp->rx_opt.ts_recent; 593 opts->tsecr = tp->rx_opt.ts_recent;
590 remaining -= TCPOLEN_TSTAMP_ALIGNED; 594 remaining -= TCPOLEN_TSTAMP_ALIGNED;
591 } 595 }
592 if (likely(sysctl_tcp_window_scaling && 596 if (likely(sysctl_tcp_window_scaling)) {
593 !dst_feature(dst, RTAX_FEATURE_NO_WSCALE))) {
594 opts->ws = tp->rx_opt.rcv_wscale; 597 opts->ws = tp->rx_opt.rcv_wscale;
595 opts->options |= OPTION_WSCALE; 598 opts->options |= OPTION_WSCALE;
596 remaining -= TCPOLEN_WSCALE_ALIGNED; 599 remaining -= TCPOLEN_WSCALE_ALIGNED;
597 } 600 }
598 if (likely(sysctl_tcp_sack && 601 if (likely(sysctl_tcp_sack)) {
599 !dst_feature(dst, RTAX_FEATURE_NO_SACK))) {
600 opts->options |= OPTION_SACK_ADVERTISE; 602 opts->options |= OPTION_SACK_ADVERTISE;
601 if (unlikely(!(OPTION_TS & opts->options))) 603 if (unlikely(!(OPTION_TS & opts->options)))
602 remaining -= TCPOLEN_SACKPERM_ALIGNED; 604 remaining -= TCPOLEN_SACKPERM_ALIGNED;
@@ -1799,11 +1801,6 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1799void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 1801void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
1800 int nonagle) 1802 int nonagle)
1801{ 1803{
1802 struct sk_buff *skb = tcp_send_head(sk);
1803
1804 if (!skb)
1805 return;
1806
1807 /* If we are closed, the bytes will have to remain here. 1804 /* If we are closed, the bytes will have to remain here.
1808 * In time closedown will finish, we empty the write queue and 1805 * In time closedown will finish, we empty the write queue and
1809 * all will be happy. 1806 * all will be happy.
@@ -2398,13 +2395,17 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2398 struct tcp_extend_values *xvp = tcp_xv(rvp); 2395 struct tcp_extend_values *xvp = tcp_xv(rvp);
2399 struct inet_request_sock *ireq = inet_rsk(req); 2396 struct inet_request_sock *ireq = inet_rsk(req);
2400 struct tcp_sock *tp = tcp_sk(sk); 2397 struct tcp_sock *tp = tcp_sk(sk);
2398 const struct tcp_cookie_values *cvp = tp->cookie_values;
2401 struct tcphdr *th; 2399 struct tcphdr *th;
2402 struct sk_buff *skb; 2400 struct sk_buff *skb;
2403 struct tcp_md5sig_key *md5; 2401 struct tcp_md5sig_key *md5;
2404 int tcp_header_size; 2402 int tcp_header_size;
2405 int mss; 2403 int mss;
2404 int s_data_desired = 0;
2406 2405
2407 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); 2406 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
2407 s_data_desired = cvp->s_data_desired;
2408 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
2408 if (skb == NULL) 2409 if (skb == NULL)
2409 return NULL; 2410 return NULL;
2410 2411
@@ -2427,7 +2428,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2427 &req->rcv_wnd, 2428 &req->rcv_wnd,
2428 &req->window_clamp, 2429 &req->window_clamp,
2429 ireq->wscale_ok, 2430 ireq->wscale_ok,
2430 &rcv_wscale); 2431 &rcv_wscale,
2432 dst_metric(dst, RTAX_INITRWND));
2431 ireq->rcv_wscale = rcv_wscale; 2433 ireq->rcv_wscale = rcv_wscale;
2432 } 2434 }
2433 2435
@@ -2459,16 +2461,12 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2459 TCPCB_FLAG_SYN | TCPCB_FLAG_ACK); 2461 TCPCB_FLAG_SYN | TCPCB_FLAG_ACK);
2460 2462
2461 if (OPTION_COOKIE_EXTENSION & opts.options) { 2463 if (OPTION_COOKIE_EXTENSION & opts.options) {
2462 const struct tcp_cookie_values *cvp = tp->cookie_values; 2464 if (s_data_desired) {
2463 2465 u8 *buf = skb_put(skb, s_data_desired);
2464 if (cvp != NULL &&
2465 cvp->s_data_constant &&
2466 cvp->s_data_desired > 0) {
2467 u8 *buf = skb_put(skb, cvp->s_data_desired);
2468 2466
2469 /* copy data directly from the listening socket. */ 2467 /* copy data directly from the listening socket. */
2470 memcpy(buf, cvp->s_data_payload, cvp->s_data_desired); 2468 memcpy(buf, cvp->s_data_payload, s_data_desired);
2471 TCP_SKB_CB(skb)->end_seq += cvp->s_data_desired; 2469 TCP_SKB_CB(skb)->end_seq += s_data_desired;
2472 } 2470 }
2473 2471
2474 if (opts.hash_size > 0) { 2472 if (opts.hash_size > 0) {
@@ -2527,9 +2525,7 @@ static void tcp_connect_init(struct sock *sk)
2527 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 2525 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
2528 */ 2526 */
2529 tp->tcp_header_len = sizeof(struct tcphdr) + 2527 tp->tcp_header_len = sizeof(struct tcphdr) +
2530 (sysctl_tcp_timestamps && 2528 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
2531 (!dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) ?
2532 TCPOLEN_TSTAMP_ALIGNED : 0));
2533 2529
2534#ifdef CONFIG_TCP_MD5SIG 2530#ifdef CONFIG_TCP_MD5SIG
2535 if (tp->af_specific->md5_lookup(sk, sk) != NULL) 2531 if (tp->af_specific->md5_lookup(sk, sk) != NULL)
@@ -2555,9 +2551,9 @@ static void tcp_connect_init(struct sock *sk)
2555 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 2551 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2556 &tp->rcv_wnd, 2552 &tp->rcv_wnd,
2557 &tp->window_clamp, 2553 &tp->window_clamp,
2558 (sysctl_tcp_window_scaling && 2554 sysctl_tcp_window_scaling,
2559 !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)), 2555 &rcv_wscale,
2560 &rcv_wscale); 2556 dst_metric(dst, RTAX_INITRWND));
2561 2557
2562 tp->rx_opt.rcv_wscale = rcv_wscale; 2558 tp->rx_opt.rcv_wscale = rcv_wscale;
2563 tp->rcv_ssthresh = tp->rcv_wnd; 2559 tp->rcv_ssthresh = tp->rcv_wnd;
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index bb110c5ce1d2..9bc805df95d2 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -39,9 +39,9 @@ static int port __read_mostly = 0;
39MODULE_PARM_DESC(port, "Port to match (0=all)"); 39MODULE_PARM_DESC(port, "Port to match (0=all)");
40module_param(port, int, 0); 40module_param(port, int, 0);
41 41
42static int bufsize __read_mostly = 4096; 42static unsigned int bufsize __read_mostly = 4096;
43MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)"); 43MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)");
44module_param(bufsize, int, 0); 44module_param(bufsize, uint, 0);
45 45
46static int full __read_mostly; 46static int full __read_mostly;
47MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)"); 47MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)");
@@ -75,12 +75,12 @@ static struct {
75 75
76static inline int tcp_probe_used(void) 76static inline int tcp_probe_used(void)
77{ 77{
78 return (tcp_probe.head - tcp_probe.tail) % bufsize; 78 return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1);
79} 79}
80 80
81static inline int tcp_probe_avail(void) 81static inline int tcp_probe_avail(void)
82{ 82{
83 return bufsize - tcp_probe_used(); 83 return bufsize - tcp_probe_used() - 1;
84} 84}
85 85
86/* 86/*
@@ -116,7 +116,7 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
116 p->ssthresh = tcp_current_ssthresh(sk); 116 p->ssthresh = tcp_current_ssthresh(sk);
117 p->srtt = tp->srtt >> 3; 117 p->srtt = tp->srtt >> 3;
118 118
119 tcp_probe.head = (tcp_probe.head + 1) % bufsize; 119 tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
120 } 120 }
121 tcp_probe.lastcwnd = tp->snd_cwnd; 121 tcp_probe.lastcwnd = tp->snd_cwnd;
122 spin_unlock(&tcp_probe.lock); 122 spin_unlock(&tcp_probe.lock);
@@ -149,7 +149,7 @@ static int tcpprobe_open(struct inode * inode, struct file * file)
149static int tcpprobe_sprint(char *tbuf, int n) 149static int tcpprobe_sprint(char *tbuf, int n)
150{ 150{
151 const struct tcp_log *p 151 const struct tcp_log *p
152 = tcp_probe.log + tcp_probe.tail % bufsize; 152 = tcp_probe.log + tcp_probe.tail;
153 struct timespec tv 153 struct timespec tv
154 = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); 154 = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start));
155 155
@@ -192,7 +192,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
192 width = tcpprobe_sprint(tbuf, sizeof(tbuf)); 192 width = tcpprobe_sprint(tbuf, sizeof(tbuf));
193 193
194 if (cnt + width < len) 194 if (cnt + width < len)
195 tcp_probe.tail = (tcp_probe.tail + 1) % bufsize; 195 tcp_probe.tail = (tcp_probe.tail + 1) & (bufsize - 1);
196 196
197 spin_unlock_bh(&tcp_probe.lock); 197 spin_unlock_bh(&tcp_probe.lock);
198 198
@@ -222,9 +222,10 @@ static __init int tcpprobe_init(void)
222 init_waitqueue_head(&tcp_probe.wait); 222 init_waitqueue_head(&tcp_probe.wait);
223 spin_lock_init(&tcp_probe.lock); 223 spin_lock_init(&tcp_probe.lock);
224 224
225 if (bufsize < 0) 225 if (bufsize == 0)
226 return -EINVAL; 226 return -EINVAL;
227 227
228 bufsize = roundup_pow_of_two(bufsize);
228 tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL); 229 tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL);
229 if (!tcp_probe.log) 230 if (!tcp_probe.log)
230 goto err0; 231 goto err0;
@@ -236,7 +237,7 @@ static __init int tcpprobe_init(void)
236 if (ret) 237 if (ret)
237 goto err1; 238 goto err1;
238 239
239 pr_info("TCP probe registered (port=%d)\n", port); 240 pr_info("TCP probe registered (port=%d) bufsize=%u\n", port, bufsize);
240 return 0; 241 return 0;
241 err1: 242 err1:
242 proc_net_remove(&init_net, procname); 243 proc_net_remove(&init_net, procname);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 8816a20c2597..b2e6bbccaee1 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -29,6 +29,7 @@ int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL;
29int sysctl_tcp_retries1 __read_mostly = TCP_RETR1; 29int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
30int sysctl_tcp_retries2 __read_mostly = TCP_RETR2; 30int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
31int sysctl_tcp_orphan_retries __read_mostly; 31int sysctl_tcp_orphan_retries __read_mostly;
32int sysctl_tcp_thin_linear_timeouts __read_mostly;
32 33
33static void tcp_write_timer(unsigned long); 34static void tcp_write_timer(unsigned long);
34static void tcp_delack_timer(unsigned long); 35static void tcp_delack_timer(unsigned long);
@@ -133,7 +134,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
133} 134}
134 135
135/* This function calculates a "timeout" which is equivalent to the timeout of a 136/* This function calculates a "timeout" which is equivalent to the timeout of a
136 * TCP connection after "boundary" unsucessful, exponentially backed-off 137 * TCP connection after "boundary" unsuccessful, exponentially backed-off
137 * retransmissions with an initial RTO of TCP_RTO_MIN. 138 * retransmissions with an initial RTO of TCP_RTO_MIN.
138 */ 139 */
139static bool retransmits_timed_out(struct sock *sk, 140static bool retransmits_timed_out(struct sock *sk,
@@ -415,7 +416,25 @@ void tcp_retransmit_timer(struct sock *sk)
415 icsk->icsk_retransmits++; 416 icsk->icsk_retransmits++;
416 417
417out_reset_timer: 418out_reset_timer:
418 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); 419 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
420 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
421 * might be increased if the stream oscillates between thin and thick,
422 * thus the old value might already be too high compared to the value
423 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
424 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
425 * exponential backoff behaviour to avoid continue hammering
426 * linear-timeout retransmissions into a black hole
427 */
428 if (sk->sk_state == TCP_ESTABLISHED &&
429 (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
430 tcp_stream_is_thin(tp) &&
431 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
432 icsk->icsk_backoff = 0;
433 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
434 } else {
435 /* Use normal (exponential) backoff */
436 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
437 }
419 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); 438 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
420 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1)) 439 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1))
421 __sk_dst_reset(sk); 440 __sk_dst_reset(sk);
@@ -474,6 +493,12 @@ static void tcp_synack_timer(struct sock *sk)
474 TCP_TIMEOUT_INIT, TCP_RTO_MAX); 493 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
475} 494}
476 495
496void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
497{
498 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
499}
500EXPORT_SYMBOL(tcp_syn_ack_timeout);
501
477void tcp_set_keepalive(struct sock *sk, int val) 502void tcp_set_keepalive(struct sock *sk, int val)
478{ 503{
479 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) 504 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1f9534846ca9..7af756d0f931 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -216,9 +216,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
216 * force rand to be an odd multiple of UDP_HTABLE_SIZE 216 * force rand to be an odd multiple of UDP_HTABLE_SIZE
217 */ 217 */
218 rand = (rand | 1) * (udptable->mask + 1); 218 rand = (rand | 1) * (udptable->mask + 1);
219 for (last = first + udptable->mask + 1; 219 last = first + udptable->mask + 1;
220 first != last; 220 do {
221 first++) {
222 hslot = udp_hashslot(udptable, net, first); 221 hslot = udp_hashslot(udptable, net, first);
223 bitmap_zero(bitmap, PORTS_PER_CHAIN); 222 bitmap_zero(bitmap, PORTS_PER_CHAIN);
224 spin_lock_bh(&hslot->lock); 223 spin_lock_bh(&hslot->lock);
@@ -238,7 +237,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
238 snum += rand; 237 snum += rand;
239 } while (snum != first); 238 } while (snum != first);
240 spin_unlock_bh(&hslot->lock); 239 spin_unlock_bh(&hslot->lock);
241 } 240 } while (++first != last);
242 goto fail; 241 goto fail;
243 } else { 242 } else {
244 hslot = udp_hashslot(udptable, net, snum); 243 hslot = udp_hashslot(udptable, net, snum);
@@ -1118,7 +1117,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1118 struct inet_sock *inet = inet_sk(sk); 1117 struct inet_sock *inet = inet_sk(sk);
1119 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; 1118 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
1120 struct sk_buff *skb; 1119 struct sk_buff *skb;
1121 unsigned int ulen, copied; 1120 unsigned int ulen;
1122 int peeked; 1121 int peeked;
1123 int err; 1122 int err;
1124 int is_udplite = IS_UDPLITE(sk); 1123 int is_udplite = IS_UDPLITE(sk);
@@ -1139,10 +1138,9 @@ try_again:
1139 goto out; 1138 goto out;
1140 1139
1141 ulen = skb->len - sizeof(struct udphdr); 1140 ulen = skb->len - sizeof(struct udphdr);
1142 copied = len; 1141 if (len > ulen)
1143 if (copied > ulen) 1142 len = ulen;
1144 copied = ulen; 1143 else if (len < ulen)
1145 else if (copied < ulen)
1146 msg->msg_flags |= MSG_TRUNC; 1144 msg->msg_flags |= MSG_TRUNC;
1147 1145
1148 /* 1146 /*
@@ -1151,14 +1149,14 @@ try_again:
1151 * coverage checksum (UDP-Lite), do it before the copy. 1149 * coverage checksum (UDP-Lite), do it before the copy.
1152 */ 1150 */
1153 1151
1154 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { 1152 if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
1155 if (udp_lib_checksum_complete(skb)) 1153 if (udp_lib_checksum_complete(skb))
1156 goto csum_copy_err; 1154 goto csum_copy_err;
1157 } 1155 }
1158 1156
1159 if (skb_csum_unnecessary(skb)) 1157 if (skb_csum_unnecessary(skb))
1160 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), 1158 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
1161 msg->msg_iov, copied); 1159 msg->msg_iov, len);
1162 else { 1160 else {
1163 err = skb_copy_and_csum_datagram_iovec(skb, 1161 err = skb_copy_and_csum_datagram_iovec(skb,
1164 sizeof(struct udphdr), 1162 sizeof(struct udphdr),
@@ -1187,7 +1185,7 @@ try_again:
1187 if (inet->cmsg_flags) 1185 if (inet->cmsg_flags)
1188 ip_cmsg_recv(msg, skb); 1186 ip_cmsg_recv(msg, skb);
1189 1187
1190 err = copied; 1188 err = len;
1191 if (flags & MSG_TRUNC) 1189 if (flags & MSG_TRUNC)
1192 err = ulen; 1190 err = ulen;
1193 1191
@@ -1373,8 +1371,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1373 bh_lock_sock(sk); 1371 bh_lock_sock(sk);
1374 if (!sock_owned_by_user(sk)) 1372 if (!sock_owned_by_user(sk))
1375 rc = __udp_queue_rcv_skb(sk, skb); 1373 rc = __udp_queue_rcv_skb(sk, skb);
1376 else 1374 else if (sk_add_backlog(sk, skb)) {
1377 sk_add_backlog(sk, skb); 1375 bh_unlock_sock(sk);
1376 goto drop;
1377 }
1378 bh_unlock_sock(sk); 1378 bh_unlock_sock(sk);
1379 1379
1380 return rc; 1380 return rc;
@@ -2028,12 +2028,12 @@ static struct udp_seq_afinfo udp4_seq_afinfo = {
2028 }, 2028 },
2029}; 2029};
2030 2030
2031static int udp4_proc_init_net(struct net *net) 2031static int __net_init udp4_proc_init_net(struct net *net)
2032{ 2032{
2033 return udp_proc_register(net, &udp4_seq_afinfo); 2033 return udp_proc_register(net, &udp4_seq_afinfo);
2034} 2034}
2035 2035
2036static void udp4_proc_exit_net(struct net *net) 2036static void __net_exit udp4_proc_exit_net(struct net *net)
2037{ 2037{
2038 udp_proc_unregister(net, &udp4_seq_afinfo); 2038 udp_proc_unregister(net, &udp4_seq_afinfo);
2039} 2039}
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 66f79513f4a5..6610bf76369f 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -81,12 +81,12 @@ static struct udp_seq_afinfo udplite4_seq_afinfo = {
81 }, 81 },
82}; 82};
83 83
84static int udplite4_proc_init_net(struct net *net) 84static int __net_init udplite4_proc_init_net(struct net *net)
85{ 85{
86 return udp_proc_register(net, &udplite4_seq_afinfo); 86 return udp_proc_register(net, &udplite4_seq_afinfo);
87} 87}
88 88
89static void udplite4_proc_exit_net(struct net *net) 89static void __net_exit udplite4_proc_exit_net(struct net *net)
90{ 90{
91 udp_proc_unregister(net, &udplite4_seq_afinfo); 91 udp_proc_unregister(net, &udplite4_seq_afinfo);
92} 92}
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 8c08a28d8f83..e4a1483fba77 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -15,7 +15,6 @@
15#include <net/xfrm.h> 15#include <net/xfrm.h>
16#include <net/ip.h> 16#include <net/ip.h>
17 17
18static struct dst_ops xfrm4_dst_ops;
19static struct xfrm_policy_afinfo xfrm4_policy_afinfo; 18static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
20 19
21static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, 20static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
@@ -92,11 +91,12 @@ static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
92 return 0; 91 return 0;
93} 92}
94 93
95static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) 94static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
95 struct flowi *fl)
96{ 96{
97 struct rtable *rt = (struct rtable *)xdst->route; 97 struct rtable *rt = (struct rtable *)xdst->route;
98 98
99 xdst->u.rt.fl = rt->fl; 99 xdst->u.rt.fl = *fl;
100 100
101 xdst->u.dst.dev = dev; 101 xdst->u.dst.dev = dev;
102 dev_hold(dev); 102 dev_hold(dev);
@@ -190,8 +190,10 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
190 190
191static inline int xfrm4_garbage_collect(struct dst_ops *ops) 191static inline int xfrm4_garbage_collect(struct dst_ops *ops)
192{ 192{
193 xfrm4_policy_afinfo.garbage_collect(&init_net); 193 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
194 return (atomic_read(&xfrm4_dst_ops.entries) > xfrm4_dst_ops.gc_thresh*2); 194
195 xfrm4_policy_afinfo.garbage_collect(net);
196 return (atomic_read(&ops->entries) > ops->gc_thresh * 2);
195} 197}
196 198
197static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu) 199static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -268,7 +270,7 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
268static struct ctl_table xfrm4_policy_table[] = { 270static struct ctl_table xfrm4_policy_table[] = {
269 { 271 {
270 .procname = "xfrm4_gc_thresh", 272 .procname = "xfrm4_gc_thresh",
271 .data = &xfrm4_dst_ops.gc_thresh, 273 .data = &init_net.xfrm.xfrm4_dst_ops.gc_thresh,
272 .maxlen = sizeof(int), 274 .maxlen = sizeof(int),
273 .mode = 0644, 275 .mode = 0644,
274 .proc_handler = proc_dointvec, 276 .proc_handler = proc_dointvec,
@@ -295,8 +297,6 @@ static void __exit xfrm4_policy_fini(void)
295 297
296void __init xfrm4_init(int rt_max_size) 298void __init xfrm4_init(int rt_max_size)
297{ 299{
298 xfrm4_state_init();
299 xfrm4_policy_init();
300 /* 300 /*
301 * Select a default value for the gc_thresh based on the main route 301 * Select a default value for the gc_thresh based on the main route
302 * table hash size. It seems to me the worst case scenario is when 302 * table hash size. It seems to me the worst case scenario is when
@@ -308,6 +308,9 @@ void __init xfrm4_init(int rt_max_size)
308 * and start cleaning when were 1/2 full 308 * and start cleaning when were 1/2 full
309 */ 309 */
310 xfrm4_dst_ops.gc_thresh = rt_max_size/2; 310 xfrm4_dst_ops.gc_thresh = rt_max_size/2;
311
312 xfrm4_state_init();
313 xfrm4_policy_init();
311#ifdef CONFIG_SYSCTL 314#ifdef CONFIG_SYSCTL
312 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, 315 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path,
313 xfrm4_policy_table); 316 xfrm4_policy_table);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index de7a194a64ab..7e567ae5eaab 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -278,31 +278,31 @@ static void addrconf_mod_timer(struct inet6_ifaddr *ifp,
278 278
279static int snmp6_alloc_dev(struct inet6_dev *idev) 279static int snmp6_alloc_dev(struct inet6_dev *idev)
280{ 280{
281 if (snmp_mib_init((void **)idev->stats.ipv6, 281 if (snmp_mib_init((void __percpu **)idev->stats.ipv6,
282 sizeof(struct ipstats_mib)) < 0) 282 sizeof(struct ipstats_mib)) < 0)
283 goto err_ip; 283 goto err_ip;
284 if (snmp_mib_init((void **)idev->stats.icmpv6, 284 if (snmp_mib_init((void __percpu **)idev->stats.icmpv6,
285 sizeof(struct icmpv6_mib)) < 0) 285 sizeof(struct icmpv6_mib)) < 0)
286 goto err_icmp; 286 goto err_icmp;
287 if (snmp_mib_init((void **)idev->stats.icmpv6msg, 287 if (snmp_mib_init((void __percpu **)idev->stats.icmpv6msg,
288 sizeof(struct icmpv6msg_mib)) < 0) 288 sizeof(struct icmpv6msg_mib)) < 0)
289 goto err_icmpmsg; 289 goto err_icmpmsg;
290 290
291 return 0; 291 return 0;
292 292
293err_icmpmsg: 293err_icmpmsg:
294 snmp_mib_free((void **)idev->stats.icmpv6); 294 snmp_mib_free((void __percpu **)idev->stats.icmpv6);
295err_icmp: 295err_icmp:
296 snmp_mib_free((void **)idev->stats.ipv6); 296 snmp_mib_free((void __percpu **)idev->stats.ipv6);
297err_ip: 297err_ip:
298 return -ENOMEM; 298 return -ENOMEM;
299} 299}
300 300
301static void snmp6_free_dev(struct inet6_dev *idev) 301static void snmp6_free_dev(struct inet6_dev *idev)
302{ 302{
303 snmp_mib_free((void **)idev->stats.icmpv6msg); 303 snmp_mib_free((void __percpu **)idev->stats.icmpv6msg);
304 snmp_mib_free((void **)idev->stats.icmpv6); 304 snmp_mib_free((void __percpu **)idev->stats.icmpv6);
305 snmp_mib_free((void **)idev->stats.ipv6); 305 snmp_mib_free((void __percpu **)idev->stats.ipv6);
306} 306}
307 307
308/* Nobody refers to this device, we may destroy it. */ 308/* Nobody refers to this device, we may destroy it. */
@@ -502,8 +502,11 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
502 if (p == &net->ipv6.devconf_dflt->forwarding) 502 if (p == &net->ipv6.devconf_dflt->forwarding)
503 return 0; 503 return 0;
504 504
505 if (!rtnl_trylock()) 505 if (!rtnl_trylock()) {
506 /* Restore the original values before restarting */
507 *p = old;
506 return restart_syscall(); 508 return restart_syscall();
509 }
507 510
508 if (p == &net->ipv6.devconf_all->forwarding) { 511 if (p == &net->ipv6.devconf_all->forwarding) {
509 __s32 newf = net->ipv6.devconf_all->forwarding; 512 __s32 newf = net->ipv6.devconf_all->forwarding;
@@ -989,8 +992,7 @@ struct ipv6_saddr_dst {
989 992
990static inline int ipv6_saddr_preferred(int type) 993static inline int ipv6_saddr_preferred(int type)
991{ 994{
992 if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4| 995 if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
993 IPV6_ADDR_LOOPBACK|IPV6_ADDR_RESERVED))
994 return 1; 996 return 1;
995 return 0; 997 return 0;
996} 998}
@@ -1378,6 +1380,8 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
1378 if (dad_failed) 1380 if (dad_failed)
1379 ifp->flags |= IFA_F_DADFAILED; 1381 ifp->flags |= IFA_F_DADFAILED;
1380 spin_unlock_bh(&ifp->lock); 1382 spin_unlock_bh(&ifp->lock);
1383 if (dad_failed)
1384 ipv6_ifa_notify(0, ifp);
1381 in6_ifa_put(ifp); 1385 in6_ifa_put(ifp);
1382#ifdef CONFIG_IPV6_PRIVACY 1386#ifdef CONFIG_IPV6_PRIVACY
1383 } else if (ifp->flags&IFA_F_TEMPORARY) { 1387 } else if (ifp->flags&IFA_F_TEMPORARY) {
@@ -2613,7 +2617,7 @@ static void addrconf_bonding_change(struct net_device *dev, unsigned long event)
2613static int addrconf_ifdown(struct net_device *dev, int how) 2617static int addrconf_ifdown(struct net_device *dev, int how)
2614{ 2618{
2615 struct inet6_dev *idev; 2619 struct inet6_dev *idev;
2616 struct inet6_ifaddr *ifa, **bifa; 2620 struct inet6_ifaddr *ifa, *keep_list, **bifa;
2617 struct net *net = dev_net(dev); 2621 struct net *net = dev_net(dev);
2618 int i; 2622 int i;
2619 2623
@@ -2646,11 +2650,12 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2646 2650
2647 write_lock_bh(&addrconf_hash_lock); 2651 write_lock_bh(&addrconf_hash_lock);
2648 while ((ifa = *bifa) != NULL) { 2652 while ((ifa = *bifa) != NULL) {
2649 if (ifa->idev == idev) { 2653 if (ifa->idev == idev &&
2654 (how || !(ifa->flags&IFA_F_PERMANENT) ||
2655 ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
2650 *bifa = ifa->lst_next; 2656 *bifa = ifa->lst_next;
2651 ifa->lst_next = NULL; 2657 ifa->lst_next = NULL;
2652 addrconf_del_timer(ifa); 2658 __in6_ifa_put(ifa);
2653 in6_ifa_put(ifa);
2654 continue; 2659 continue;
2655 } 2660 }
2656 bifa = &ifa->lst_next; 2661 bifa = &ifa->lst_next;
@@ -2686,11 +2691,40 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2686 write_lock_bh(&idev->lock); 2691 write_lock_bh(&idev->lock);
2687 } 2692 }
2688#endif 2693#endif
2694 keep_list = NULL;
2695 bifa = &keep_list;
2689 while ((ifa = idev->addr_list) != NULL) { 2696 while ((ifa = idev->addr_list) != NULL) {
2690 idev->addr_list = ifa->if_next; 2697 idev->addr_list = ifa->if_next;
2691 ifa->if_next = NULL; 2698 ifa->if_next = NULL;
2692 ifa->dead = 1; 2699
2693 addrconf_del_timer(ifa); 2700 addrconf_del_timer(ifa);
2701
2702 /* If just doing link down, and address is permanent
2703 and not link-local, then retain it. */
2704 if (how == 0 &&
2705 (ifa->flags&IFA_F_PERMANENT) &&
2706 !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
2707
2708 /* Move to holding list */
2709 *bifa = ifa;
2710 bifa = &ifa->if_next;
2711
2712 /* If not doing DAD on this address, just keep it. */
2713 if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
2714 idev->cnf.accept_dad <= 0 ||
2715 (ifa->flags & IFA_F_NODAD))
2716 continue;
2717
2718 /* If it was tentative already, no need to notify */
2719 if (ifa->flags & IFA_F_TENTATIVE)
2720 continue;
2721
2722 /* Flag it for later restoration when link comes up */
2723 ifa->flags |= IFA_F_TENTATIVE;
2724 in6_ifa_hold(ifa);
2725 } else {
2726 ifa->dead = 1;
2727 }
2694 write_unlock_bh(&idev->lock); 2728 write_unlock_bh(&idev->lock);
2695 2729
2696 __ipv6_ifa_notify(RTM_DELADDR, ifa); 2730 __ipv6_ifa_notify(RTM_DELADDR, ifa);
@@ -2699,6 +2733,9 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2699 2733
2700 write_lock_bh(&idev->lock); 2734 write_lock_bh(&idev->lock);
2701 } 2735 }
2736
2737 idev->addr_list = keep_list;
2738
2702 write_unlock_bh(&idev->lock); 2739 write_unlock_bh(&idev->lock);
2703 2740
2704 /* Step 5: Discard multicast list */ 2741 /* Step 5: Discard multicast list */
@@ -2724,28 +2761,29 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2724static void addrconf_rs_timer(unsigned long data) 2761static void addrconf_rs_timer(unsigned long data)
2725{ 2762{
2726 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data; 2763 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data;
2764 struct inet6_dev *idev = ifp->idev;
2727 2765
2728 if (ifp->idev->cnf.forwarding) 2766 read_lock(&idev->lock);
2767 if (idev->dead || !(idev->if_flags & IF_READY))
2729 goto out; 2768 goto out;
2730 2769
2731 if (ifp->idev->if_flags & IF_RA_RCVD) { 2770 if (idev->cnf.forwarding)
2732 /* 2771 goto out;
2733 * Announcement received after solicitation 2772
2734 * was sent 2773 /* Announcement received after solicitation was sent */
2735 */ 2774 if (idev->if_flags & IF_RA_RCVD)
2736 goto out; 2775 goto out;
2737 }
2738 2776
2739 spin_lock(&ifp->lock); 2777 spin_lock(&ifp->lock);
2740 if (ifp->probes++ < ifp->idev->cnf.rtr_solicits) { 2778 if (ifp->probes++ < idev->cnf.rtr_solicits) {
2741 /* The wait after the last probe can be shorter */ 2779 /* The wait after the last probe can be shorter */
2742 addrconf_mod_timer(ifp, AC_RS, 2780 addrconf_mod_timer(ifp, AC_RS,
2743 (ifp->probes == ifp->idev->cnf.rtr_solicits) ? 2781 (ifp->probes == idev->cnf.rtr_solicits) ?
2744 ifp->idev->cnf.rtr_solicit_delay : 2782 idev->cnf.rtr_solicit_delay :
2745 ifp->idev->cnf.rtr_solicit_interval); 2783 idev->cnf.rtr_solicit_interval);
2746 spin_unlock(&ifp->lock); 2784 spin_unlock(&ifp->lock);
2747 2785
2748 ndisc_send_rs(ifp->idev->dev, &ifp->addr, &in6addr_linklocal_allrouters); 2786 ndisc_send_rs(idev->dev, &ifp->addr, &in6addr_linklocal_allrouters);
2749 } else { 2787 } else {
2750 spin_unlock(&ifp->lock); 2788 spin_unlock(&ifp->lock);
2751 /* 2789 /*
@@ -2753,10 +2791,11 @@ static void addrconf_rs_timer(unsigned long data)
2753 * assumption any longer. 2791 * assumption any longer.
2754 */ 2792 */
2755 printk(KERN_DEBUG "%s: no IPv6 routers present\n", 2793 printk(KERN_DEBUG "%s: no IPv6 routers present\n",
2756 ifp->idev->dev->name); 2794 idev->dev->name);
2757 } 2795 }
2758 2796
2759out: 2797out:
2798 read_unlock(&idev->lock);
2760 in6_ifa_put(ifp); 2799 in6_ifa_put(ifp);
2761} 2800}
2762 2801
@@ -2789,14 +2828,14 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
2789 read_lock_bh(&idev->lock); 2828 read_lock_bh(&idev->lock);
2790 if (ifp->dead) 2829 if (ifp->dead)
2791 goto out; 2830 goto out;
2792 spin_lock_bh(&ifp->lock);
2793 2831
2832 spin_lock(&ifp->lock);
2794 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || 2833 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
2795 idev->cnf.accept_dad < 1 || 2834 idev->cnf.accept_dad < 1 ||
2796 !(ifp->flags&IFA_F_TENTATIVE) || 2835 !(ifp->flags&IFA_F_TENTATIVE) ||
2797 ifp->flags & IFA_F_NODAD) { 2836 ifp->flags & IFA_F_NODAD) {
2798 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); 2837 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
2799 spin_unlock_bh(&ifp->lock); 2838 spin_unlock(&ifp->lock);
2800 read_unlock_bh(&idev->lock); 2839 read_unlock_bh(&idev->lock);
2801 2840
2802 addrconf_dad_completed(ifp); 2841 addrconf_dad_completed(ifp);
@@ -2804,7 +2843,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
2804 } 2843 }
2805 2844
2806 if (!(idev->if_flags & IF_READY)) { 2845 if (!(idev->if_flags & IF_READY)) {
2807 spin_unlock_bh(&ifp->lock); 2846 spin_unlock(&ifp->lock);
2808 read_unlock_bh(&idev->lock); 2847 read_unlock_bh(&idev->lock);
2809 /* 2848 /*
2810 * If the device is not ready: 2849 * If the device is not ready:
@@ -2824,7 +2863,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
2824 ip6_ins_rt(ifp->rt); 2863 ip6_ins_rt(ifp->rt);
2825 2864
2826 addrconf_dad_kick(ifp); 2865 addrconf_dad_kick(ifp);
2827 spin_unlock_bh(&ifp->lock); 2866 spin_unlock(&ifp->lock);
2828out: 2867out:
2829 read_unlock_bh(&idev->lock); 2868 read_unlock_bh(&idev->lock);
2830} 2869}
@@ -2835,20 +2874,21 @@ static void addrconf_dad_timer(unsigned long data)
2835 struct inet6_dev *idev = ifp->idev; 2874 struct inet6_dev *idev = ifp->idev;
2836 struct in6_addr mcaddr; 2875 struct in6_addr mcaddr;
2837 2876
2838 read_lock_bh(&idev->lock); 2877 read_lock(&idev->lock);
2839 if (idev->dead) { 2878 if (idev->dead || !(idev->if_flags & IF_READY)) {
2840 read_unlock_bh(&idev->lock); 2879 read_unlock(&idev->lock);
2841 goto out; 2880 goto out;
2842 } 2881 }
2843 spin_lock_bh(&ifp->lock); 2882
2883 spin_lock(&ifp->lock);
2844 if (ifp->probes == 0) { 2884 if (ifp->probes == 0) {
2845 /* 2885 /*
2846 * DAD was successful 2886 * DAD was successful
2847 */ 2887 */
2848 2888
2849 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); 2889 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
2850 spin_unlock_bh(&ifp->lock); 2890 spin_unlock(&ifp->lock);
2851 read_unlock_bh(&idev->lock); 2891 read_unlock(&idev->lock);
2852 2892
2853 addrconf_dad_completed(ifp); 2893 addrconf_dad_completed(ifp);
2854 2894
@@ -2857,8 +2897,8 @@ static void addrconf_dad_timer(unsigned long data)
2857 2897
2858 ifp->probes--; 2898 ifp->probes--;
2859 addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time); 2899 addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time);
2860 spin_unlock_bh(&ifp->lock); 2900 spin_unlock(&ifp->lock);
2861 read_unlock_bh(&idev->lock); 2901 read_unlock(&idev->lock);
2862 2902
2863 /* send a neighbour solicitation for our addr */ 2903 /* send a neighbour solicitation for our addr */
2864 addrconf_addr_solict_mult(&ifp->addr, &mcaddr); 2904 addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
@@ -2905,12 +2945,12 @@ static void addrconf_dad_run(struct inet6_dev *idev) {
2905 2945
2906 read_lock_bh(&idev->lock); 2946 read_lock_bh(&idev->lock);
2907 for (ifp = idev->addr_list; ifp; ifp = ifp->if_next) { 2947 for (ifp = idev->addr_list; ifp; ifp = ifp->if_next) {
2908 spin_lock_bh(&ifp->lock); 2948 spin_lock(&ifp->lock);
2909 if (!(ifp->flags & IFA_F_TENTATIVE)) { 2949 if (!(ifp->flags & IFA_F_TENTATIVE)) {
2910 spin_unlock_bh(&ifp->lock); 2950 spin_unlock(&ifp->lock);
2911 continue; 2951 continue;
2912 } 2952 }
2913 spin_unlock_bh(&ifp->lock); 2953 spin_unlock(&ifp->lock);
2914 addrconf_dad_kick(ifp); 2954 addrconf_dad_kick(ifp);
2915 } 2955 }
2916 read_unlock_bh(&idev->lock); 2956 read_unlock_bh(&idev->lock);
@@ -3027,14 +3067,14 @@ static const struct file_operations if6_fops = {
3027 .release = seq_release_net, 3067 .release = seq_release_net,
3028}; 3068};
3029 3069
3030static int if6_proc_net_init(struct net *net) 3070static int __net_init if6_proc_net_init(struct net *net)
3031{ 3071{
3032 if (!proc_net_fops_create(net, "if_inet6", S_IRUGO, &if6_fops)) 3072 if (!proc_net_fops_create(net, "if_inet6", S_IRUGO, &if6_fops))
3033 return -ENOMEM; 3073 return -ENOMEM;
3034 return 0; 3074 return 0;
3035} 3075}
3036 3076
3037static void if6_proc_net_exit(struct net *net) 3077static void __net_exit if6_proc_net_exit(struct net *net)
3038{ 3078{
3039 proc_net_remove(net, "if_inet6"); 3079 proc_net_remove(net, "if_inet6");
3040} 3080}
@@ -3570,7 +3610,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3570 hlist_for_each_entry_rcu(dev, node, head, index_hlist) { 3610 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
3571 if (idx < s_idx) 3611 if (idx < s_idx)
3572 goto cont; 3612 goto cont;
3573 if (idx > s_idx) 3613 if (h > s_h || idx > s_idx)
3574 s_ip_idx = 0; 3614 s_ip_idx = 0;
3575 ip_idx = 0; 3615 ip_idx = 0;
3576 if ((idev = __in6_dev_get(dev)) == NULL) 3616 if ((idev = __in6_dev_get(dev)) == NULL)
@@ -3752,8 +3792,8 @@ static inline size_t inet6_if_nlmsg_size(void)
3752 ); 3792 );
3753} 3793}
3754 3794
3755static inline void __snmp6_fill_stats(u64 *stats, void **mib, int items, 3795static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib,
3756 int bytes) 3796 int items, int bytes)
3757{ 3797{
3758 int i; 3798 int i;
3759 int pad = bytes - sizeof(u64) * items; 3799 int pad = bytes - sizeof(u64) * items;
@@ -3772,10 +3812,10 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
3772{ 3812{
3773 switch(attrtype) { 3813 switch(attrtype) {
3774 case IFLA_INET6_STATS: 3814 case IFLA_INET6_STATS:
3775 __snmp6_fill_stats(stats, (void **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes); 3815 __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes);
3776 break; 3816 break;
3777 case IFLA_INET6_ICMP6STATS: 3817 case IFLA_INET6_ICMP6STATS:
3778 __snmp6_fill_stats(stats, (void **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes); 3818 __snmp6_fill_stats(stats, (void __percpu **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes);
3779 break; 3819 break;
3780 } 3820 }
3781} 3821}
@@ -4028,12 +4068,15 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
4028{ 4068{
4029 int *valp = ctl->data; 4069 int *valp = ctl->data;
4030 int val = *valp; 4070 int val = *valp;
4071 loff_t pos = *ppos;
4031 int ret; 4072 int ret;
4032 4073
4033 ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 4074 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
4034 4075
4035 if (write) 4076 if (write)
4036 ret = addrconf_fixup_forwarding(ctl, valp, val); 4077 ret = addrconf_fixup_forwarding(ctl, valp, val);
4078 if (ret)
4079 *ppos = pos;
4037 return ret; 4080 return ret;
4038} 4081}
4039 4082
@@ -4075,8 +4118,11 @@ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old)
4075 if (p == &net->ipv6.devconf_dflt->disable_ipv6) 4118 if (p == &net->ipv6.devconf_dflt->disable_ipv6)
4076 return 0; 4119 return 0;
4077 4120
4078 if (!rtnl_trylock()) 4121 if (!rtnl_trylock()) {
4122 /* Restore the original values before restarting */
4123 *p = old;
4079 return restart_syscall(); 4124 return restart_syscall();
4125 }
4080 4126
4081 if (p == &net->ipv6.devconf_all->disable_ipv6) { 4127 if (p == &net->ipv6.devconf_all->disable_ipv6) {
4082 __s32 newf = net->ipv6.devconf_all->disable_ipv6; 4128 __s32 newf = net->ipv6.devconf_all->disable_ipv6;
@@ -4095,12 +4141,15 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
4095{ 4141{
4096 int *valp = ctl->data; 4142 int *valp = ctl->data;
4097 int val = *valp; 4143 int val = *valp;
4144 loff_t pos = *ppos;
4098 int ret; 4145 int ret;
4099 4146
4100 ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 4147 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
4101 4148
4102 if (write) 4149 if (write)
4103 ret = addrconf_disable_ipv6(ctl, valp, val); 4150 ret = addrconf_disable_ipv6(ctl, valp, val);
4151 if (ret)
4152 *ppos = pos;
4104 return ret; 4153 return ret;
4105} 4154}
4106 4155
@@ -4402,8 +4451,7 @@ static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
4402 4451
4403static void addrconf_sysctl_register(struct inet6_dev *idev) 4452static void addrconf_sysctl_register(struct inet6_dev *idev)
4404{ 4453{
4405 neigh_sysctl_register(idev->dev, idev->nd_parms, NET_IPV6, 4454 neigh_sysctl_register(idev->dev, idev->nd_parms, "ipv6",
4406 NET_IPV6_NEIGH, "ipv6",
4407 &ndisc_ifinfo_sysctl_change); 4455 &ndisc_ifinfo_sysctl_change);
4408 __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name, 4456 __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
4409 idev, &idev->cnf); 4457 idev, &idev->cnf);
@@ -4418,7 +4466,7 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev)
4418 4466
4419#endif 4467#endif
4420 4468
4421static int addrconf_init_net(struct net *net) 4469static int __net_init addrconf_init_net(struct net *net)
4422{ 4470{
4423 int err; 4471 int err;
4424 struct ipv6_devconf *all, *dflt; 4472 struct ipv6_devconf *all, *dflt;
@@ -4467,7 +4515,7 @@ err_alloc_all:
4467 return err; 4515 return err;
4468} 4516}
4469 4517
4470static void addrconf_exit_net(struct net *net) 4518static void __net_exit addrconf_exit_net(struct net *net)
4471{ 4519{
4472#ifdef CONFIG_SYSCTL 4520#ifdef CONFIG_SYSCTL
4473 __addrconf_sysctl_unregister(net->ipv6.devconf_dflt); 4521 __addrconf_sysctl_unregister(net->ipv6.devconf_dflt);
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 3f82e9542eda..6b03826552e1 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -72,7 +72,7 @@ int __ipv6_addr_type(const struct in6_addr *addr)
72 IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */ 72 IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */
73 } 73 }
74 74
75 return (IPV6_ADDR_RESERVED | 75 return (IPV6_ADDR_UNICAST |
76 IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.4 */ 76 IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.4 */
77} 77}
78EXPORT_SYMBOL(__ipv6_addr_type); 78EXPORT_SYMBOL(__ipv6_addr_type);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 12e69d364dd5..37d14e735c27 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -971,41 +971,41 @@ static void ipv6_packet_cleanup(void)
971 971
972static int __net_init ipv6_init_mibs(struct net *net) 972static int __net_init ipv6_init_mibs(struct net *net)
973{ 973{
974 if (snmp_mib_init((void **)net->mib.udp_stats_in6, 974 if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6,
975 sizeof (struct udp_mib)) < 0) 975 sizeof (struct udp_mib)) < 0)
976 return -ENOMEM; 976 return -ENOMEM;
977 if (snmp_mib_init((void **)net->mib.udplite_stats_in6, 977 if (snmp_mib_init((void __percpu **)net->mib.udplite_stats_in6,
978 sizeof (struct udp_mib)) < 0) 978 sizeof (struct udp_mib)) < 0)
979 goto err_udplite_mib; 979 goto err_udplite_mib;
980 if (snmp_mib_init((void **)net->mib.ipv6_statistics, 980 if (snmp_mib_init((void __percpu **)net->mib.ipv6_statistics,
981 sizeof(struct ipstats_mib)) < 0) 981 sizeof(struct ipstats_mib)) < 0)
982 goto err_ip_mib; 982 goto err_ip_mib;
983 if (snmp_mib_init((void **)net->mib.icmpv6_statistics, 983 if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics,
984 sizeof(struct icmpv6_mib)) < 0) 984 sizeof(struct icmpv6_mib)) < 0)
985 goto err_icmp_mib; 985 goto err_icmp_mib;
986 if (snmp_mib_init((void **)net->mib.icmpv6msg_statistics, 986 if (snmp_mib_init((void __percpu **)net->mib.icmpv6msg_statistics,
987 sizeof(struct icmpv6msg_mib)) < 0) 987 sizeof(struct icmpv6msg_mib)) < 0)
988 goto err_icmpmsg_mib; 988 goto err_icmpmsg_mib;
989 return 0; 989 return 0;
990 990
991err_icmpmsg_mib: 991err_icmpmsg_mib:
992 snmp_mib_free((void **)net->mib.icmpv6_statistics); 992 snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics);
993err_icmp_mib: 993err_icmp_mib:
994 snmp_mib_free((void **)net->mib.ipv6_statistics); 994 snmp_mib_free((void __percpu **)net->mib.ipv6_statistics);
995err_ip_mib: 995err_ip_mib:
996 snmp_mib_free((void **)net->mib.udplite_stats_in6); 996 snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6);
997err_udplite_mib: 997err_udplite_mib:
998 snmp_mib_free((void **)net->mib.udp_stats_in6); 998 snmp_mib_free((void __percpu **)net->mib.udp_stats_in6);
999 return -ENOMEM; 999 return -ENOMEM;
1000} 1000}
1001 1001
1002static void __net_exit ipv6_cleanup_mibs(struct net *net) 1002static void ipv6_cleanup_mibs(struct net *net)
1003{ 1003{
1004 snmp_mib_free((void **)net->mib.udp_stats_in6); 1004 snmp_mib_free((void __percpu **)net->mib.udp_stats_in6);
1005 snmp_mib_free((void **)net->mib.udplite_stats_in6); 1005 snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6);
1006 snmp_mib_free((void **)net->mib.ipv6_statistics); 1006 snmp_mib_free((void __percpu **)net->mib.ipv6_statistics);
1007 snmp_mib_free((void **)net->mib.icmpv6_statistics); 1007 snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics);
1008 snmp_mib_free((void **)net->mib.icmpv6msg_statistics); 1008 snmp_mib_free((void __percpu **)net->mib.icmpv6msg_statistics);
1009} 1009}
1010 1010
1011static int __net_init inet6_net_init(struct net *net) 1011static int __net_init inet6_net_init(struct net *net)
@@ -1042,7 +1042,7 @@ out:
1042#endif 1042#endif
1043} 1043}
1044 1044
1045static void inet6_net_exit(struct net *net) 1045static void __net_exit inet6_net_exit(struct net *net)
1046{ 1046{
1047#ifdef CONFIG_PROC_FS 1047#ifdef CONFIG_PROC_FS
1048 udp6_proc_exit(net); 1048 udp6_proc_exit(net);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index c2f300c314be..5ac89025f9de 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -614,7 +614,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
614 type != ICMPV6_PKT_TOOBIG) 614 type != ICMPV6_PKT_TOOBIG)
615 return; 615 return;
616 616
617 x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6); 617 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
618 if (!x) 618 if (!x)
619 return; 619 return;
620 620
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index f1c74c8ef9de..c4f6ca32fa74 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -538,7 +538,7 @@ static const struct file_operations ac6_seq_fops = {
538 .release = seq_release_net, 538 .release = seq_release_net,
539}; 539};
540 540
541int ac6_proc_init(struct net *net) 541int __net_init ac6_proc_init(struct net *net)
542{ 542{
543 if (!proc_net_fops_create(net, "anycast6", S_IRUGO, &ac6_seq_fops)) 543 if (!proc_net_fops_create(net, "anycast6", S_IRUGO, &ac6_seq_fops))
544 return -ENOMEM; 544 return -ENOMEM;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 668a46b655e6..ee9b93bdd6a2 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -365,7 +365,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
365 type != ICMPV6_PKT_TOOBIG) 365 type != ICMPV6_PKT_TOOBIG)
366 return; 366 return;
367 367
368 x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6); 368 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6);
369 if (!x) 369 if (!x)
370 return; 370 return;
371 printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n", 371 printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n",
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index df159fffe4bc..074f2c084f9f 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -481,7 +481,7 @@ looped_back:
481 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), 481 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
482 IPSTATS_MIB_INHDRERRORS); 482 IPSTATS_MIB_INHDRERRORS);
483 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 483 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
484 0, skb->dev); 484 0);
485 kfree_skb(skb); 485 kfree_skb(skb);
486 return -1; 486 return -1;
487 } 487 }
@@ -559,6 +559,11 @@ static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
559 return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev); 559 return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev);
560} 560}
561 561
562static inline struct net *ipv6_skb_net(struct sk_buff *skb)
563{
564 return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
565}
566
562/* Router Alert as of RFC 2711 */ 567/* Router Alert as of RFC 2711 */
563 568
564static int ipv6_hop_ra(struct sk_buff *skb, int optoff) 569static int ipv6_hop_ra(struct sk_buff *skb, int optoff)
@@ -580,8 +585,8 @@ static int ipv6_hop_ra(struct sk_buff *skb, int optoff)
580static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff) 585static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
581{ 586{
582 const unsigned char *nh = skb_network_header(skb); 587 const unsigned char *nh = skb_network_header(skb);
588 struct net *net = ipv6_skb_net(skb);
583 u32 pkt_len; 589 u32 pkt_len;
584 struct net *net = dev_net(skb_dst(skb)->dev);
585 590
586 if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { 591 if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
587 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", 592 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index b7aa7c64cc4a..5e463c43fcc2 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -84,18 +84,11 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
84 if ((rule->flags & FIB_RULE_FIND_SADDR) && 84 if ((rule->flags & FIB_RULE_FIND_SADDR) &&
85 r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) { 85 r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) {
86 struct in6_addr saddr; 86 struct in6_addr saddr;
87 unsigned int srcprefs = 0;
88
89 if (flags & RT6_LOOKUP_F_SRCPREF_TMP)
90 srcprefs |= IPV6_PREFER_SRC_TMP;
91 if (flags & RT6_LOOKUP_F_SRCPREF_PUBLIC)
92 srcprefs |= IPV6_PREFER_SRC_PUBLIC;
93 if (flags & RT6_LOOKUP_F_SRCPREF_COA)
94 srcprefs |= IPV6_PREFER_SRC_COA;
95 87
96 if (ipv6_dev_get_saddr(net, 88 if (ipv6_dev_get_saddr(net,
97 ip6_dst_idev(&rt->u.dst)->dev, 89 ip6_dst_idev(&rt->u.dst)->dev,
98 &flp->fl6_dst, srcprefs, 90 &flp->fl6_dst,
91 rt6_flags2srcprefs(flags),
99 &saddr)) 92 &saddr))
100 goto again; 93 goto again;
101 if (!ipv6_prefix_equal(&saddr, &r->src.addr, 94 if (!ipv6_prefix_equal(&saddr, &r->src.addr,
@@ -262,7 +255,7 @@ static struct fib_rules_ops fib6_rules_ops_template = {
262 .fro_net = &init_net, 255 .fro_net = &init_net,
263}; 256};
264 257
265static int fib6_rules_net_init(struct net *net) 258static int __net_init fib6_rules_net_init(struct net *net)
266{ 259{
267 struct fib_rules_ops *ops; 260 struct fib_rules_ops *ops;
268 int err = -ENOMEM; 261 int err = -ENOMEM;
@@ -291,7 +284,7 @@ out_fib6_rules_ops:
291 goto out; 284 goto out;
292} 285}
293 286
294static void fib6_rules_net_exit(struct net *net) 287static void __net_exit fib6_rules_net_exit(struct net *net)
295{ 288{
296 fib_rules_unregister(net->ipv6.fib6_rules_ops); 289 fib_rules_unregister(net->ipv6.fib6_rules_ops);
297} 290}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 4ae661bc3677..eb9abe24bdf0 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -67,11 +67,6 @@
67#include <asm/uaccess.h> 67#include <asm/uaccess.h>
68#include <asm/system.h> 68#include <asm/system.h>
69 69
70DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
71EXPORT_SYMBOL(icmpv6_statistics);
72DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics) __read_mostly;
73EXPORT_SYMBOL(icmpv6msg_statistics);
74
75/* 70/*
76 * The ICMP socket(s). This is the most convenient way to flow control 71 * The ICMP socket(s). This is the most convenient way to flow control
77 * our ICMP output as well as maintain a clean interface throughout 72 * our ICMP output as well as maintain a clean interface throughout
@@ -119,7 +114,7 @@ static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
119 */ 114 */
120void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) 115void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
121{ 116{
122 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev); 117 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos);
123 kfree_skb(skb); 118 kfree_skb(skb);
124} 119}
125 120
@@ -305,8 +300,7 @@ static inline void mip6_addr_swap(struct sk_buff *skb) {}
305/* 300/*
306 * Send an ICMP message in response to a packet in error 301 * Send an ICMP message in response to a packet in error
307 */ 302 */
308void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, 303void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
309 struct net_device *dev)
310{ 304{
311 struct net *net = dev_net(skb->dev); 305 struct net *net = dev_net(skb->dev);
312 struct inet6_dev *idev = NULL; 306 struct inet6_dev *idev = NULL;
@@ -951,7 +945,7 @@ ctl_table ipv6_icmp_table_template[] = {
951 { }, 945 { },
952}; 946};
953 947
954struct ctl_table *ipv6_icmp_sysctl_init(struct net *net) 948struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
955{ 949{
956 struct ctl_table *table; 950 struct ctl_table *table;
957 951
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 0e93ca56eb69..2f9847924fa5 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -93,29 +93,20 @@ static __u32 rt_sernum;
93 93
94static void fib6_gc_timer_cb(unsigned long arg); 94static void fib6_gc_timer_cb(unsigned long arg);
95 95
96static struct fib6_walker_t fib6_walker_list = { 96static LIST_HEAD(fib6_walkers);
97 .prev = &fib6_walker_list, 97#define FOR_WALKERS(w) list_for_each_entry(w, &fib6_walkers, lh)
98 .next = &fib6_walker_list,
99};
100
101#define FOR_WALKERS(w) for ((w)=fib6_walker_list.next; (w) != &fib6_walker_list; (w)=(w)->next)
102 98
103static inline void fib6_walker_link(struct fib6_walker_t *w) 99static inline void fib6_walker_link(struct fib6_walker_t *w)
104{ 100{
105 write_lock_bh(&fib6_walker_lock); 101 write_lock_bh(&fib6_walker_lock);
106 w->next = fib6_walker_list.next; 102 list_add(&w->lh, &fib6_walkers);
107 w->prev = &fib6_walker_list;
108 w->next->prev = w;
109 w->prev->next = w;
110 write_unlock_bh(&fib6_walker_lock); 103 write_unlock_bh(&fib6_walker_lock);
111} 104}
112 105
113static inline void fib6_walker_unlink(struct fib6_walker_t *w) 106static inline void fib6_walker_unlink(struct fib6_walker_t *w)
114{ 107{
115 write_lock_bh(&fib6_walker_lock); 108 write_lock_bh(&fib6_walker_lock);
116 w->next->prev = w->prev; 109 list_del(&w->lh);
117 w->prev->next = w->next;
118 w->prev = w->next = w;
119 write_unlock_bh(&fib6_walker_lock); 110 write_unlock_bh(&fib6_walker_lock);
120} 111}
121static __inline__ u32 fib6_new_sernum(void) 112static __inline__ u32 fib6_new_sernum(void)
@@ -239,7 +230,7 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
239 return NULL; 230 return NULL;
240} 231}
241 232
242static void fib6_tables_init(struct net *net) 233static void __net_init fib6_tables_init(struct net *net)
243{ 234{
244 fib6_link_table(net, net->ipv6.fib6_main_tbl); 235 fib6_link_table(net, net->ipv6.fib6_main_tbl);
245 fib6_link_table(net, net->ipv6.fib6_local_tbl); 236 fib6_link_table(net, net->ipv6.fib6_local_tbl);
@@ -262,7 +253,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl,
262 return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl, flags); 253 return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl, flags);
263} 254}
264 255
265static void fib6_tables_init(struct net *net) 256static void __net_init fib6_tables_init(struct net *net)
266{ 257{
267 fib6_link_table(net, net->ipv6.fib6_main_tbl); 258 fib6_link_table(net, net->ipv6.fib6_main_tbl);
268} 259}
@@ -319,12 +310,26 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
319 w->root = &table->tb6_root; 310 w->root = &table->tb6_root;
320 311
321 if (cb->args[4] == 0) { 312 if (cb->args[4] == 0) {
313 w->count = 0;
314 w->skip = 0;
315
322 read_lock_bh(&table->tb6_lock); 316 read_lock_bh(&table->tb6_lock);
323 res = fib6_walk(w); 317 res = fib6_walk(w);
324 read_unlock_bh(&table->tb6_lock); 318 read_unlock_bh(&table->tb6_lock);
325 if (res > 0) 319 if (res > 0) {
326 cb->args[4] = 1; 320 cb->args[4] = 1;
321 cb->args[5] = w->root->fn_sernum;
322 }
327 } else { 323 } else {
324 if (cb->args[5] != w->root->fn_sernum) {
325 /* Begin at the root if the tree changed */
326 cb->args[5] = w->root->fn_sernum;
327 w->state = FWS_INIT;
328 w->node = w->root;
329 w->skip = w->count;
330 } else
331 w->skip = 0;
332
328 read_lock_bh(&table->tb6_lock); 333 read_lock_bh(&table->tb6_lock);
329 res = fib6_walk_continue(w); 334 res = fib6_walk_continue(w);
330 read_unlock_bh(&table->tb6_lock); 335 read_unlock_bh(&table->tb6_lock);
@@ -1250,9 +1255,18 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
1250 w->leaf = fn->leaf; 1255 w->leaf = fn->leaf;
1251 case FWS_C: 1256 case FWS_C:
1252 if (w->leaf && fn->fn_flags&RTN_RTINFO) { 1257 if (w->leaf && fn->fn_flags&RTN_RTINFO) {
1253 int err = w->func(w); 1258 int err;
1259
1260 if (w->count < w->skip) {
1261 w->count++;
1262 continue;
1263 }
1264
1265 err = w->func(w);
1254 if (err) 1266 if (err)
1255 return err; 1267 return err;
1268
1269 w->count++;
1256 continue; 1270 continue;
1257 } 1271 }
1258 w->state = FWS_U; 1272 w->state = FWS_U;
@@ -1346,6 +1360,8 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root,
1346 c.w.root = root; 1360 c.w.root = root;
1347 c.w.func = fib6_clean_node; 1361 c.w.func = fib6_clean_node;
1348 c.w.prune = prune; 1362 c.w.prune = prune;
1363 c.w.count = 0;
1364 c.w.skip = 0;
1349 c.func = func; 1365 c.func = func;
1350 c.arg = arg; 1366 c.arg = arg;
1351 c.net = net; 1367 c.net = net;
@@ -1469,7 +1485,7 @@ static void fib6_gc_timer_cb(unsigned long arg)
1469 fib6_run_gc(0, (struct net *)arg); 1485 fib6_run_gc(0, (struct net *)arg);
1470} 1486}
1471 1487
1472static int fib6_net_init(struct net *net) 1488static int __net_init fib6_net_init(struct net *net)
1473{ 1489{
1474 setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net); 1490 setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net);
1475 1491
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 6e7bffa2205e..e41eba8aacf1 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -154,7 +154,7 @@ static void ip6_fl_gc(unsigned long dummy)
154 write_unlock(&ip6_fl_lock); 154 write_unlock(&ip6_fl_lock);
155} 155}
156 156
157static void ip6_fl_purge(struct net *net) 157static void __net_exit ip6_fl_purge(struct net *net)
158{ 158{
159 int i; 159 int i;
160 160
@@ -735,7 +735,7 @@ static const struct file_operations ip6fl_seq_fops = {
735 .release = seq_release_net, 735 .release = seq_release_net,
736}; 736};
737 737
738static int ip6_flowlabel_proc_init(struct net *net) 738static int __net_init ip6_flowlabel_proc_init(struct net *net)
739{ 739{
740 if (!proc_net_fops_create(net, "ip6_flowlabel", 740 if (!proc_net_fops_create(net, "ip6_flowlabel",
741 S_IRUGO, &ip6fl_seq_fops)) 741 S_IRUGO, &ip6fl_seq_fops))
@@ -743,7 +743,7 @@ static int ip6_flowlabel_proc_init(struct net *net)
743 return 0; 743 return 0;
744} 744}
745 745
746static void ip6_flowlabel_proc_fini(struct net *net) 746static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
747{ 747{
748 proc_net_remove(net, "ip6_flowlabel"); 748 proc_net_remove(net, "ip6_flowlabel");
749} 749}
@@ -754,11 +754,10 @@ static inline int ip6_flowlabel_proc_init(struct net *net)
754} 754}
755static inline void ip6_flowlabel_proc_fini(struct net *net) 755static inline void ip6_flowlabel_proc_fini(struct net *net)
756{ 756{
757 return ;
758} 757}
759#endif 758#endif
760 759
761static inline void ip6_flowlabel_net_exit(struct net *net) 760static void __net_exit ip6_flowlabel_net_exit(struct net *net)
762{ 761{
763 ip6_fl_purge(net); 762 ip6_fl_purge(net);
764 ip6_flowlabel_proc_fini(net); 763 ip6_flowlabel_proc_fini(net);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 237e2dba6e94..e28f9203deca 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -216,8 +216,7 @@ resubmit:
216 IP6_INC_STATS_BH(net, idev, 216 IP6_INC_STATS_BH(net, idev,
217 IPSTATS_MIB_INUNKNOWNPROTOS); 217 IPSTATS_MIB_INUNKNOWNPROTOS);
218 icmpv6_send(skb, ICMPV6_PARAMPROB, 218 icmpv6_send(skb, ICMPV6_PARAMPROB,
219 ICMPV6_UNK_NEXTHDR, nhoff, 219 ICMPV6_UNK_NEXTHDR, nhoff);
220 skb->dev);
221 } 220 }
222 } else 221 } else
223 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); 222 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index cd48801a8d6f..dabf108ad811 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -121,10 +121,9 @@ static int ip6_output2(struct sk_buff *skb)
121 skb->dev = dev; 121 skb->dev = dev;
122 122
123 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { 123 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
124 struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
125 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); 124 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
126 125
127 if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) && 126 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
128 ((mroute6_socket(dev_net(dev)) && 127 ((mroute6_socket(dev_net(dev)) &&
129 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || 128 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
130 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, 129 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
@@ -268,7 +267,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
268 if (net_ratelimit()) 267 if (net_ratelimit())
269 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n"); 268 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
270 skb->dev = dst->dev; 269 skb->dev = dst->dev;
271 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 270 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
272 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); 271 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
273 kfree_skb(skb); 272 kfree_skb(skb);
274 return -EMSGSIZE; 273 return -EMSGSIZE;
@@ -403,6 +402,7 @@ int ip6_forward(struct sk_buff *skb)
403 struct ipv6hdr *hdr = ipv6_hdr(skb); 402 struct ipv6hdr *hdr = ipv6_hdr(skb);
404 struct inet6_skb_parm *opt = IP6CB(skb); 403 struct inet6_skb_parm *opt = IP6CB(skb);
405 struct net *net = dev_net(dst->dev); 404 struct net *net = dev_net(dst->dev);
405 u32 mtu;
406 406
407 if (net->ipv6.devconf_all->forwarding == 0) 407 if (net->ipv6.devconf_all->forwarding == 0)
408 goto error; 408 goto error;
@@ -442,8 +442,7 @@ int ip6_forward(struct sk_buff *skb)
442 if (hdr->hop_limit <= 1) { 442 if (hdr->hop_limit <= 1) {
443 /* Force OUTPUT device used as source address */ 443 /* Force OUTPUT device used as source address */
444 skb->dev = dst->dev; 444 skb->dev = dst->dev;
445 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 445 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
446 0, skb->dev);
447 IP6_INC_STATS_BH(net, 446 IP6_INC_STATS_BH(net,
448 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); 447 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
449 448
@@ -505,15 +504,19 @@ int ip6_forward(struct sk_buff *skb)
505 goto error; 504 goto error;
506 if (addrtype & IPV6_ADDR_LINKLOCAL) { 505 if (addrtype & IPV6_ADDR_LINKLOCAL) {
507 icmpv6_send(skb, ICMPV6_DEST_UNREACH, 506 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
508 ICMPV6_NOT_NEIGHBOUR, 0, skb->dev); 507 ICMPV6_NOT_NEIGHBOUR, 0);
509 goto error; 508 goto error;
510 } 509 }
511 } 510 }
512 511
513 if (skb->len > dst_mtu(dst)) { 512 mtu = dst_mtu(dst);
513 if (mtu < IPV6_MIN_MTU)
514 mtu = IPV6_MIN_MTU;
515
516 if (skb->len > mtu) {
514 /* Again, force OUTPUT device used as source address */ 517 /* Again, force OUTPUT device used as source address */
515 skb->dev = dst->dev; 518 skb->dev = dst->dev;
516 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev); 519 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
517 IP6_INC_STATS_BH(net, 520 IP6_INC_STATS_BH(net,
518 ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS); 521 ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
519 IP6_INC_STATS_BH(net, 522 IP6_INC_STATS_BH(net,
@@ -623,12 +626,11 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
623 mtu = ip6_skb_dst_mtu(skb); 626 mtu = ip6_skb_dst_mtu(skb);
624 627
625 /* We must not fragment if the socket is set to force MTU discovery 628 /* We must not fragment if the socket is set to force MTU discovery
626 * or if the skb it not generated by a local socket. (This last 629 * or if the skb it not generated by a local socket.
627 * check should be redundant, but it's free.)
628 */ 630 */
629 if (!skb->local_df) { 631 if (!skb->local_df) {
630 skb->dev = skb_dst(skb)->dev; 632 skb->dev = skb_dst(skb)->dev;
631 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 633 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
632 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 634 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
633 IPSTATS_MIB_FRAGFAILS); 635 IPSTATS_MIB_FRAGFAILS);
634 kfree_skb(skb); 636 kfree_skb(skb);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index d453d07b0dfe..138980eec214 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -74,7 +74,6 @@ MODULE_LICENSE("GPL");
74 (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \ 74 (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \
75 (HASH_SIZE - 1)) 75 (HASH_SIZE - 1))
76 76
77static void ip6_fb_tnl_dev_init(struct net_device *dev);
78static void ip6_tnl_dev_init(struct net_device *dev); 77static void ip6_tnl_dev_init(struct net_device *dev);
79static void ip6_tnl_dev_setup(struct net_device *dev); 78static void ip6_tnl_dev_setup(struct net_device *dev);
80 79
@@ -623,7 +622,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
623 if (rt && rt->rt6i_dev) 622 if (rt && rt->rt6i_dev)
624 skb2->dev = rt->rt6i_dev; 623 skb2->dev = rt->rt6i_dev;
625 624
626 icmpv6_send(skb2, rel_type, rel_code, rel_info, skb2->dev); 625 icmpv6_send(skb2, rel_type, rel_code, rel_info);
627 626
628 if (rt) 627 if (rt)
629 dst_release(&rt->u.dst); 628 dst_release(&rt->u.dst);
@@ -1015,7 +1014,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1015 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; 1014 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
1016 if (tel->encap_limit == 0) { 1015 if (tel->encap_limit == 0) {
1017 icmpv6_send(skb, ICMPV6_PARAMPROB, 1016 icmpv6_send(skb, ICMPV6_PARAMPROB,
1018 ICMPV6_HDR_FIELD, offset + 2, skb->dev); 1017 ICMPV6_HDR_FIELD, offset + 2);
1019 return -1; 1018 return -1;
1020 } 1019 }
1021 encap_limit = tel->encap_limit - 1; 1020 encap_limit = tel->encap_limit - 1;
@@ -1034,7 +1033,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1034 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu); 1033 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu);
1035 if (err != 0) { 1034 if (err != 0) {
1036 if (err == -EMSGSIZE) 1035 if (err == -EMSGSIZE)
1037 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 1036 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1038 return -1; 1037 return -1;
1039 } 1038 }
1040 1039
@@ -1364,7 +1363,7 @@ static void ip6_tnl_dev_init(struct net_device *dev)
1364 * Return: 0 1363 * Return: 0
1365 **/ 1364 **/
1366 1365
1367static void ip6_fb_tnl_dev_init(struct net_device *dev) 1366static void __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1368{ 1367{
1369 struct ip6_tnl *t = netdev_priv(dev); 1368 struct ip6_tnl *t = netdev_priv(dev);
1370 struct net *net = dev_net(dev); 1369 struct net *net = dev_net(dev);
@@ -1388,7 +1387,7 @@ static struct xfrm6_tunnel ip6ip6_handler = {
1388 .priority = 1, 1387 .priority = 1,
1389}; 1388};
1390 1389
1391static void ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n) 1390static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1392{ 1391{
1393 int h; 1392 int h;
1394 struct ip6_tnl *t; 1393 struct ip6_tnl *t;
@@ -1407,7 +1406,7 @@ static void ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1407 unregister_netdevice_many(&list); 1406 unregister_netdevice_many(&list);
1408} 1407}
1409 1408
1410static int ip6_tnl_init_net(struct net *net) 1409static int __net_init ip6_tnl_init_net(struct net *net)
1411{ 1410{
1412 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1411 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1413 int err; 1412 int err;
@@ -1436,7 +1435,7 @@ err_alloc_dev:
1436 return err; 1435 return err;
1437} 1436}
1438 1437
1439static void ip6_tnl_exit_net(struct net *net) 1438static void __net_exit ip6_tnl_exit_net(struct net *net)
1440{ 1439{
1441 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1440 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1442 1441
@@ -1462,27 +1461,29 @@ static int __init ip6_tunnel_init(void)
1462{ 1461{
1463 int err; 1462 int err;
1464 1463
1465 if (xfrm6_tunnel_register(&ip4ip6_handler, AF_INET)) { 1464 err = register_pernet_device(&ip6_tnl_net_ops);
1465 if (err < 0)
1466 goto out_pernet;
1467
1468 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
1469 if (err < 0) {
1466 printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n"); 1470 printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n");
1467 err = -EAGAIN; 1471 goto out_ip4ip6;
1468 goto out;
1469 } 1472 }
1470 1473
1471 if (xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6)) { 1474 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
1475 if (err < 0) {
1472 printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n"); 1476 printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n");
1473 err = -EAGAIN; 1477 goto out_ip6ip6;
1474 goto unreg_ip4ip6;
1475 } 1478 }
1476 1479
1477 err = register_pernet_device(&ip6_tnl_net_ops);
1478 if (err < 0)
1479 goto err_pernet;
1480 return 0; 1480 return 0;
1481err_pernet: 1481
1482 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6); 1482out_ip6ip6:
1483unreg_ip4ip6:
1484 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); 1483 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
1485out: 1484out_ip4ip6:
1485 unregister_pernet_device(&ip6_tnl_net_ops);
1486out_pernet:
1486 return err; 1487 return err;
1487} 1488}
1488 1489
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 52e0f74fdfe0..27acfb58650a 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1113,6 +1113,9 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
1113 unsigned char ttls[MAXMIFS]; 1113 unsigned char ttls[MAXMIFS];
1114 int i; 1114 int i;
1115 1115
1116 if (mfc->mf6cc_parent >= MAXMIFS)
1117 return -ENFILE;
1118
1116 memset(ttls, 255, MAXMIFS); 1119 memset(ttls, 255, MAXMIFS);
1117 for (i = 0; i < MAXMIFS; i++) { 1120 for (i = 0; i < MAXMIFS; i++) {
1118 if (IF_ISSET(i, &mfc->mf6cc_ifset)) 1121 if (IF_ISSET(i, &mfc->mf6cc_ifset))
@@ -1692,17 +1695,20 @@ ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm)
1692 int ct; 1695 int ct;
1693 struct rtnexthop *nhp; 1696 struct rtnexthop *nhp;
1694 struct net *net = mfc6_net(c); 1697 struct net *net = mfc6_net(c);
1695 struct net_device *dev = net->ipv6.vif6_table[c->mf6c_parent].dev;
1696 u8 *b = skb_tail_pointer(skb); 1698 u8 *b = skb_tail_pointer(skb);
1697 struct rtattr *mp_head; 1699 struct rtattr *mp_head;
1698 1700
1699 if (dev) 1701 /* If cache is unresolved, don't try to parse IIF and OIF */
1700 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); 1702 if (c->mf6c_parent > MAXMIFS)
1703 return -ENOENT;
1704
1705 if (MIF_EXISTS(net, c->mf6c_parent))
1706 RTA_PUT(skb, RTA_IIF, 4, &net->ipv6.vif6_table[c->mf6c_parent].dev->ifindex);
1701 1707
1702 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); 1708 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1703 1709
1704 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { 1710 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1705 if (c->mfc_un.res.ttls[ct] < 255) { 1711 if (MIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) {
1706 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 1712 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1707 goto rtattr_failure; 1713 goto rtattr_failure;
1708 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 1714 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 2f2a5ca2c878..85cccd6ed0b7 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -53,6 +53,7 @@
53static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 53static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
54 u8 type, u8 code, int offset, __be32 info) 54 u8 type, u8 code, int offset, __be32 info)
55{ 55{
56 struct net *net = dev_net(skb->dev);
56 __be32 spi; 57 __be32 spi;
57 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 58 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
58 struct ip_comp_hdr *ipcomph = 59 struct ip_comp_hdr *ipcomph =
@@ -63,7 +64,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
63 return; 64 return;
64 65
65 spi = htonl(ntohs(ipcomph->cpi)); 66 spi = htonl(ntohs(ipcomph->cpi));
66 x = xfrm_state_lookup(&init_net, (xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6); 67 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6);
67 if (!x) 68 if (!x)
68 return; 69 return;
69 70
@@ -74,14 +75,15 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
74 75
75static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x) 76static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
76{ 77{
78 struct net *net = xs_net(x);
77 struct xfrm_state *t = NULL; 79 struct xfrm_state *t = NULL;
78 80
79 t = xfrm_state_alloc(&init_net); 81 t = xfrm_state_alloc(net);
80 if (!t) 82 if (!t)
81 goto out; 83 goto out;
82 84
83 t->id.proto = IPPROTO_IPV6; 85 t->id.proto = IPPROTO_IPV6;
84 t->id.spi = xfrm6_tunnel_alloc_spi((xfrm_address_t *)&x->props.saddr); 86 t->id.spi = xfrm6_tunnel_alloc_spi(net, (xfrm_address_t *)&x->props.saddr);
85 if (!t->id.spi) 87 if (!t->id.spi)
86 goto error; 88 goto error;
87 89
@@ -90,6 +92,7 @@ static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
90 t->props.family = AF_INET6; 92 t->props.family = AF_INET6;
91 t->props.mode = x->props.mode; 93 t->props.mode = x->props.mode;
92 memcpy(t->props.saddr.a6, x->props.saddr.a6, sizeof(struct in6_addr)); 94 memcpy(t->props.saddr.a6, x->props.saddr.a6, sizeof(struct in6_addr));
95 memcpy(&t->mark, &x->mark, sizeof(t->mark));
93 96
94 if (xfrm_init_state(t)) 97 if (xfrm_init_state(t))
95 goto error; 98 goto error;
@@ -108,13 +111,15 @@ error:
108 111
109static int ipcomp6_tunnel_attach(struct xfrm_state *x) 112static int ipcomp6_tunnel_attach(struct xfrm_state *x)
110{ 113{
114 struct net *net = xs_net(x);
111 int err = 0; 115 int err = 0;
112 struct xfrm_state *t = NULL; 116 struct xfrm_state *t = NULL;
113 __be32 spi; 117 __be32 spi;
118 u32 mark = x->mark.m & x->mark.v;
114 119
115 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&x->props.saddr); 120 spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&x->props.saddr);
116 if (spi) 121 if (spi)
117 t = xfrm_state_lookup(&init_net, (xfrm_address_t *)&x->id.daddr, 122 t = xfrm_state_lookup(net, mark, (xfrm_address_t *)&x->id.daddr,
118 spi, IPPROTO_IPV6, AF_INET6); 123 spi, IPPROTO_IPV6, AF_INET6);
119 if (!t) { 124 if (!t) {
120 t = ipcomp6_tunnel_create(x); 125 t = ipcomp6_tunnel_create(x);
@@ -154,16 +159,12 @@ static int ipcomp6_init_state(struct xfrm_state *x)
154 if (x->props.mode == XFRM_MODE_TUNNEL) { 159 if (x->props.mode == XFRM_MODE_TUNNEL) {
155 err = ipcomp6_tunnel_attach(x); 160 err = ipcomp6_tunnel_attach(x);
156 if (err) 161 if (err)
157 goto error_tunnel; 162 goto out;
158 } 163 }
159 164
160 err = 0; 165 err = 0;
161out: 166out:
162 return err; 167 return err;
163error_tunnel:
164 ipcomp_destroy(x);
165
166 goto out;
167} 168}
168 169
169static const struct xfrm_type ipcomp6_type = 170static const struct xfrm_type ipcomp6_type =
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 1f9c44442e65..bcd971915969 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -793,10 +793,10 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
793 } 793 }
794 spin_unlock_bh(&im->mca_lock); 794 spin_unlock_bh(&im->mca_lock);
795 795
796 write_lock_bh(&idev->mc_lock); 796 spin_lock_bh(&idev->mc_lock);
797 pmc->next = idev->mc_tomb; 797 pmc->next = idev->mc_tomb;
798 idev->mc_tomb = pmc; 798 idev->mc_tomb = pmc;
799 write_unlock_bh(&idev->mc_lock); 799 spin_unlock_bh(&idev->mc_lock);
800} 800}
801 801
802static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca) 802static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca)
@@ -804,7 +804,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca)
804 struct ifmcaddr6 *pmc, *pmc_prev; 804 struct ifmcaddr6 *pmc, *pmc_prev;
805 struct ip6_sf_list *psf, *psf_next; 805 struct ip6_sf_list *psf, *psf_next;
806 806
807 write_lock_bh(&idev->mc_lock); 807 spin_lock_bh(&idev->mc_lock);
808 pmc_prev = NULL; 808 pmc_prev = NULL;
809 for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) { 809 for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) {
810 if (ipv6_addr_equal(&pmc->mca_addr, pmca)) 810 if (ipv6_addr_equal(&pmc->mca_addr, pmca))
@@ -817,7 +817,8 @@ static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca)
817 else 817 else
818 idev->mc_tomb = pmc->next; 818 idev->mc_tomb = pmc->next;
819 } 819 }
820 write_unlock_bh(&idev->mc_lock); 820 spin_unlock_bh(&idev->mc_lock);
821
821 if (pmc) { 822 if (pmc) {
822 for (psf=pmc->mca_tomb; psf; psf=psf_next) { 823 for (psf=pmc->mca_tomb; psf; psf=psf_next) {
823 psf_next = psf->sf_next; 824 psf_next = psf->sf_next;
@@ -832,10 +833,10 @@ static void mld_clear_delrec(struct inet6_dev *idev)
832{ 833{
833 struct ifmcaddr6 *pmc, *nextpmc; 834 struct ifmcaddr6 *pmc, *nextpmc;
834 835
835 write_lock_bh(&idev->mc_lock); 836 spin_lock_bh(&idev->mc_lock);
836 pmc = idev->mc_tomb; 837 pmc = idev->mc_tomb;
837 idev->mc_tomb = NULL; 838 idev->mc_tomb = NULL;
838 write_unlock_bh(&idev->mc_lock); 839 spin_unlock_bh(&idev->mc_lock);
839 840
840 for (; pmc; pmc = nextpmc) { 841 for (; pmc; pmc = nextpmc) {
841 nextpmc = pmc->next; 842 nextpmc = pmc->next;
@@ -1696,7 +1697,7 @@ static void mld_send_cr(struct inet6_dev *idev)
1696 int type, dtype; 1697 int type, dtype;
1697 1698
1698 read_lock_bh(&idev->lock); 1699 read_lock_bh(&idev->lock);
1699 write_lock_bh(&idev->mc_lock); 1700 spin_lock(&idev->mc_lock);
1700 1701
1701 /* deleted MCA's */ 1702 /* deleted MCA's */
1702 pmc_prev = NULL; 1703 pmc_prev = NULL;
@@ -1730,7 +1731,7 @@ static void mld_send_cr(struct inet6_dev *idev)
1730 } else 1731 } else
1731 pmc_prev = pmc; 1732 pmc_prev = pmc;
1732 } 1733 }
1733 write_unlock_bh(&idev->mc_lock); 1734 spin_unlock(&idev->mc_lock);
1734 1735
1735 /* change recs */ 1736 /* change recs */
1736 for (pmc=idev->mc_list; pmc; pmc=pmc->next) { 1737 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
@@ -2311,7 +2312,7 @@ void ipv6_mc_up(struct inet6_dev *idev)
2311void ipv6_mc_init_dev(struct inet6_dev *idev) 2312void ipv6_mc_init_dev(struct inet6_dev *idev)
2312{ 2313{
2313 write_lock_bh(&idev->lock); 2314 write_lock_bh(&idev->lock);
2314 rwlock_init(&idev->mc_lock); 2315 spin_lock_init(&idev->mc_lock);
2315 idev->mc_gq_running = 0; 2316 idev->mc_gq_running = 0;
2316 setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire, 2317 setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire,
2317 (unsigned long)idev); 2318 (unsigned long)idev);
@@ -2646,7 +2647,7 @@ static const struct file_operations igmp6_mcf_seq_fops = {
2646 .release = seq_release_net, 2647 .release = seq_release_net,
2647}; 2648};
2648 2649
2649static int igmp6_proc_init(struct net *net) 2650static int __net_init igmp6_proc_init(struct net *net)
2650{ 2651{
2651 int err; 2652 int err;
2652 2653
@@ -2666,23 +2667,22 @@ out_proc_net_igmp6:
2666 goto out; 2667 goto out;
2667} 2668}
2668 2669
2669static void igmp6_proc_exit(struct net *net) 2670static void __net_exit igmp6_proc_exit(struct net *net)
2670{ 2671{
2671 proc_net_remove(net, "mcfilter6"); 2672 proc_net_remove(net, "mcfilter6");
2672 proc_net_remove(net, "igmp6"); 2673 proc_net_remove(net, "igmp6");
2673} 2674}
2674#else 2675#else
2675static int igmp6_proc_init(struct net *net) 2676static inline int igmp6_proc_init(struct net *net)
2676{ 2677{
2677 return 0; 2678 return 0;
2678} 2679}
2679static void igmp6_proc_exit(struct net *net) 2680static inline void igmp6_proc_exit(struct net *net)
2680{ 2681{
2681 ;
2682} 2682}
2683#endif 2683#endif
2684 2684
2685static int igmp6_net_init(struct net *net) 2685static int __net_init igmp6_net_init(struct net *net)
2686{ 2686{
2687 int err; 2687 int err;
2688 2688
@@ -2708,7 +2708,7 @@ out_sock_create:
2708 goto out; 2708 goto out;
2709} 2709}
2710 2710
2711static void igmp6_net_exit(struct net *net) 2711static void __net_exit igmp6_net_exit(struct net *net)
2712{ 2712{
2713 inet_ctl_sock_destroy(net->ipv6.igmp_sk); 2713 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2714 igmp6_proc_exit(net); 2714 igmp6_proc_exit(net);
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index f797e8c6f3b3..2794b6002836 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -56,7 +56,7 @@ static inline void *mip6_padn(__u8 *data, __u8 padlen)
56 56
57static inline void mip6_param_prob(struct sk_buff *skb, u8 code, int pos) 57static inline void mip6_param_prob(struct sk_buff *skb, u8 code, int pos)
58{ 58{
59 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev); 59 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos);
60} 60}
61 61
62static int mip6_mh_len(int type) 62static int mip6_mh_len(int type)
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index c45852798092..8bcc4b7db3bf 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1772,7 +1772,7 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void __user *bu
1772 1772
1773#endif 1773#endif
1774 1774
1775static int ndisc_net_init(struct net *net) 1775static int __net_init ndisc_net_init(struct net *net)
1776{ 1776{
1777 struct ipv6_pinfo *np; 1777 struct ipv6_pinfo *np;
1778 struct sock *sk; 1778 struct sock *sk;
@@ -1797,7 +1797,7 @@ static int ndisc_net_init(struct net *net)
1797 return 0; 1797 return 0;
1798} 1798}
1799 1799
1800static void ndisc_net_exit(struct net *net) 1800static void __net_exit ndisc_net_exit(struct net *net)
1801{ 1801{
1802 inet_ctl_sock_destroy(net->ipv6.ndisc_sk); 1802 inet_ctl_sock_destroy(net->ipv6.ndisc_sk);
1803} 1803}
@@ -1820,8 +1820,7 @@ int __init ndisc_init(void)
1820 neigh_table_init(&nd_tbl); 1820 neigh_table_init(&nd_tbl);
1821 1821
1822#ifdef CONFIG_SYSCTL 1822#ifdef CONFIG_SYSCTL
1823 err = neigh_sysctl_register(NULL, &nd_tbl.parms, NET_IPV6, 1823 err = neigh_sysctl_register(NULL, &nd_tbl.parms, "ipv6",
1824 NET_IPV6_NEIGH, "ipv6",
1825 &ndisc_ifinfo_sysctl_change); 1824 &ndisc_ifinfo_sysctl_change);
1826 if (err) 1825 if (err)
1827 goto out_unregister_pernet; 1826 goto out_unregister_pernet;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 480d7f8c9802..9210e312edf1 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -29,6 +29,7 @@
29#include <linux/netfilter_ipv6/ip6_tables.h> 29#include <linux/netfilter_ipv6/ip6_tables.h>
30#include <linux/netfilter/x_tables.h> 30#include <linux/netfilter/x_tables.h>
31#include <net/netfilter/nf_log.h> 31#include <net/netfilter/nf_log.h>
32#include "../../netfilter/xt_repldata.h"
32 33
33MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
34MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 35MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -67,6 +68,12 @@ do { \
67#define inline 68#define inline
68#endif 69#endif
69 70
71void *ip6t_alloc_initial_table(const struct xt_table *info)
72{
73 return xt_alloc_initial_table(ip6t, IP6T);
74}
75EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
76
70/* 77/*
71 We keep a set of rules for each CPU, so we can avoid write-locking 78 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore 79 them in the softirq when updating the counters and therefore
@@ -201,7 +208,7 @@ ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
201 208
202/* Performance critical - called for every packet */ 209/* Performance critical - called for every packet */
203static inline bool 210static inline bool
204do_match(struct ip6t_entry_match *m, const struct sk_buff *skb, 211do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb,
205 struct xt_match_param *par) 212 struct xt_match_param *par)
206{ 213{
207 par->match = m->u.kernel.match; 214 par->match = m->u.kernel.match;
@@ -215,7 +222,7 @@ do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
215} 222}
216 223
217static inline struct ip6t_entry * 224static inline struct ip6t_entry *
218get_entry(void *base, unsigned int offset) 225get_entry(const void *base, unsigned int offset)
219{ 226{
220 return (struct ip6t_entry *)(base + offset); 227 return (struct ip6t_entry *)(base + offset);
221} 228}
@@ -229,6 +236,12 @@ static inline bool unconditional(const struct ip6t_ip6 *ipv6)
229 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0; 236 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
230} 237}
231 238
239static inline const struct ip6t_entry_target *
240ip6t_get_target_c(const struct ip6t_entry *e)
241{
242 return ip6t_get_target((struct ip6t_entry *)e);
243}
244
232#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 245#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
233 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 246 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
234/* This cries for unification! */ 247/* This cries for unification! */
@@ -264,11 +277,11 @@ static struct nf_loginfo trace_loginfo = {
264 277
265/* Mildly perf critical (only if packet tracing is on) */ 278/* Mildly perf critical (only if packet tracing is on) */
266static inline int 279static inline int
267get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e, 280get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
268 const char *hookname, const char **chainname, 281 const char *hookname, const char **chainname,
269 const char **comment, unsigned int *rulenum) 282 const char **comment, unsigned int *rulenum)
270{ 283{
271 struct ip6t_standard_target *t = (void *)ip6t_get_target(s); 284 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
272 285
273 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) { 286 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
274 /* Head of user chain: ERROR target with chainname */ 287 /* Head of user chain: ERROR target with chainname */
@@ -294,17 +307,18 @@ get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
294 return 0; 307 return 0;
295} 308}
296 309
297static void trace_packet(struct sk_buff *skb, 310static void trace_packet(const struct sk_buff *skb,
298 unsigned int hook, 311 unsigned int hook,
299 const struct net_device *in, 312 const struct net_device *in,
300 const struct net_device *out, 313 const struct net_device *out,
301 const char *tablename, 314 const char *tablename,
302 struct xt_table_info *private, 315 const struct xt_table_info *private,
303 struct ip6t_entry *e) 316 const struct ip6t_entry *e)
304{ 317{
305 void *table_base; 318 const void *table_base;
306 const struct ip6t_entry *root; 319 const struct ip6t_entry *root;
307 const char *hookname, *chainname, *comment; 320 const char *hookname, *chainname, *comment;
321 const struct ip6t_entry *iter;
308 unsigned int rulenum = 0; 322 unsigned int rulenum = 0;
309 323
310 table_base = private->entries[smp_processor_id()]; 324 table_base = private->entries[smp_processor_id()];
@@ -313,10 +327,10 @@ static void trace_packet(struct sk_buff *skb,
313 hookname = chainname = hooknames[hook]; 327 hookname = chainname = hooknames[hook];
314 comment = comments[NF_IP6_TRACE_COMMENT_RULE]; 328 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
315 329
316 IP6T_ENTRY_ITERATE(root, 330 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
317 private->size - private->hook_entry[hook], 331 if (get_chainname_rulenum(iter, e, hookname,
318 get_chainname_rulenum, 332 &chainname, &comment, &rulenum) != 0)
319 e, hookname, &chainname, &comment, &rulenum); 333 break;
320 334
321 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo, 335 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
322 "TRACE: %s:%s:%s:%u ", 336 "TRACE: %s:%s:%s:%u ",
@@ -345,9 +359,9 @@ ip6t_do_table(struct sk_buff *skb,
345 /* Initializing verdict to NF_DROP keeps gcc happy. */ 359 /* Initializing verdict to NF_DROP keeps gcc happy. */
346 unsigned int verdict = NF_DROP; 360 unsigned int verdict = NF_DROP;
347 const char *indev, *outdev; 361 const char *indev, *outdev;
348 void *table_base; 362 const void *table_base;
349 struct ip6t_entry *e, *back; 363 struct ip6t_entry *e, *back;
350 struct xt_table_info *private; 364 const struct xt_table_info *private;
351 struct xt_match_param mtpar; 365 struct xt_match_param mtpar;
352 struct xt_target_param tgpar; 366 struct xt_target_param tgpar;
353 367
@@ -378,22 +392,27 @@ ip6t_do_table(struct sk_buff *skb,
378 back = get_entry(table_base, private->underflow[hook]); 392 back = get_entry(table_base, private->underflow[hook]);
379 393
380 do { 394 do {
381 struct ip6t_entry_target *t; 395 const struct ip6t_entry_target *t;
396 const struct xt_entry_match *ematch;
382 397
383 IP_NF_ASSERT(e); 398 IP_NF_ASSERT(e);
384 IP_NF_ASSERT(back); 399 IP_NF_ASSERT(back);
385 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, 400 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
386 &mtpar.thoff, &mtpar.fragoff, &hotdrop) || 401 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
387 IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) { 402 no_match:
388 e = ip6t_next_entry(e); 403 e = ip6t_next_entry(e);
389 continue; 404 continue;
390 } 405 }
391 406
407 xt_ematch_foreach(ematch, e)
408 if (do_match(ematch, skb, &mtpar) != 0)
409 goto no_match;
410
392 ADD_COUNTER(e->counters, 411 ADD_COUNTER(e->counters,
393 ntohs(ipv6_hdr(skb)->payload_len) + 412 ntohs(ipv6_hdr(skb)->payload_len) +
394 sizeof(struct ipv6hdr), 1); 413 sizeof(struct ipv6hdr), 1);
395 414
396 t = ip6t_get_target(e); 415 t = ip6t_get_target_c(e);
397 IP_NF_ASSERT(t->u.kernel.target); 416 IP_NF_ASSERT(t->u.kernel.target);
398 417
399#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 418#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
@@ -475,7 +494,7 @@ ip6t_do_table(struct sk_buff *skb,
475/* Figures out from what hook each rule can be called: returns 0 if 494/* Figures out from what hook each rule can be called: returns 0 if
476 there are loops. Puts hook bitmask in comefrom. */ 495 there are loops. Puts hook bitmask in comefrom. */
477static int 496static int
478mark_source_chains(struct xt_table_info *newinfo, 497mark_source_chains(const struct xt_table_info *newinfo,
479 unsigned int valid_hooks, void *entry0) 498 unsigned int valid_hooks, void *entry0)
480{ 499{
481 unsigned int hook; 500 unsigned int hook;
@@ -493,8 +512,8 @@ mark_source_chains(struct xt_table_info *newinfo,
493 e->counters.pcnt = pos; 512 e->counters.pcnt = pos;
494 513
495 for (;;) { 514 for (;;) {
496 struct ip6t_standard_target *t 515 const struct ip6t_standard_target *t
497 = (void *)ip6t_get_target(e); 516 = (void *)ip6t_get_target_c(e);
498 int visited = e->comefrom & (1 << hook); 517 int visited = e->comefrom & (1 << hook);
499 518
500 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { 519 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
@@ -584,27 +603,23 @@ mark_source_chains(struct xt_table_info *newinfo,
584 return 1; 603 return 1;
585} 604}
586 605
587static int 606static void cleanup_match(struct ip6t_entry_match *m, struct net *net)
588cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
589{ 607{
590 struct xt_mtdtor_param par; 608 struct xt_mtdtor_param par;
591 609
592 if (i && (*i)-- == 0) 610 par.net = net;
593 return 1;
594
595 par.match = m->u.kernel.match; 611 par.match = m->u.kernel.match;
596 par.matchinfo = m->data; 612 par.matchinfo = m->data;
597 par.family = NFPROTO_IPV6; 613 par.family = NFPROTO_IPV6;
598 if (par.match->destroy != NULL) 614 if (par.match->destroy != NULL)
599 par.match->destroy(&par); 615 par.match->destroy(&par);
600 module_put(par.match->me); 616 module_put(par.match->me);
601 return 0;
602} 617}
603 618
604static int 619static int
605check_entry(struct ip6t_entry *e, const char *name) 620check_entry(const struct ip6t_entry *e, const char *name)
606{ 621{
607 struct ip6t_entry_target *t; 622 const struct ip6t_entry_target *t;
608 623
609 if (!ip6_checkentry(&e->ipv6)) { 624 if (!ip6_checkentry(&e->ipv6)) {
610 duprintf("ip_tables: ip check failed %p %s.\n", e, name); 625 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
@@ -615,15 +630,14 @@ check_entry(struct ip6t_entry *e, const char *name)
615 e->next_offset) 630 e->next_offset)
616 return -EINVAL; 631 return -EINVAL;
617 632
618 t = ip6t_get_target(e); 633 t = ip6t_get_target_c(e);
619 if (e->target_offset + t->u.target_size > e->next_offset) 634 if (e->target_offset + t->u.target_size > e->next_offset)
620 return -EINVAL; 635 return -EINVAL;
621 636
622 return 0; 637 return 0;
623} 638}
624 639
625static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par, 640static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
626 unsigned int *i)
627{ 641{
628 const struct ip6t_ip6 *ipv6 = par->entryinfo; 642 const struct ip6t_ip6 *ipv6 = par->entryinfo;
629 int ret; 643 int ret;
@@ -638,13 +652,11 @@ static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
638 par.match->name); 652 par.match->name);
639 return ret; 653 return ret;
640 } 654 }
641 ++*i;
642 return 0; 655 return 0;
643} 656}
644 657
645static int 658static int
646find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par, 659find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
647 unsigned int *i)
648{ 660{
649 struct xt_match *match; 661 struct xt_match *match;
650 int ret; 662 int ret;
@@ -658,7 +670,7 @@ find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
658 } 670 }
659 m->u.kernel.match = match; 671 m->u.kernel.match = match;
660 672
661 ret = check_match(m, par, i); 673 ret = check_match(m, par);
662 if (ret) 674 if (ret)
663 goto err; 675 goto err;
664 676
@@ -668,10 +680,11 @@ err:
668 return ret; 680 return ret;
669} 681}
670 682
671static int check_target(struct ip6t_entry *e, const char *name) 683static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
672{ 684{
673 struct ip6t_entry_target *t = ip6t_get_target(e); 685 struct ip6t_entry_target *t = ip6t_get_target(e);
674 struct xt_tgchk_param par = { 686 struct xt_tgchk_param par = {
687 .net = net,
675 .table = name, 688 .table = name,
676 .entryinfo = e, 689 .entryinfo = e,
677 .target = t->u.kernel.target, 690 .target = t->u.kernel.target,
@@ -693,27 +706,32 @@ static int check_target(struct ip6t_entry *e, const char *name)
693} 706}
694 707
695static int 708static int
696find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size, 709find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
697 unsigned int *i) 710 unsigned int size)
698{ 711{
699 struct ip6t_entry_target *t; 712 struct ip6t_entry_target *t;
700 struct xt_target *target; 713 struct xt_target *target;
701 int ret; 714 int ret;
702 unsigned int j; 715 unsigned int j;
703 struct xt_mtchk_param mtpar; 716 struct xt_mtchk_param mtpar;
717 struct xt_entry_match *ematch;
704 718
705 ret = check_entry(e, name); 719 ret = check_entry(e, name);
706 if (ret) 720 if (ret)
707 return ret; 721 return ret;
708 722
709 j = 0; 723 j = 0;
724 mtpar.net = net;
710 mtpar.table = name; 725 mtpar.table = name;
711 mtpar.entryinfo = &e->ipv6; 726 mtpar.entryinfo = &e->ipv6;
712 mtpar.hook_mask = e->comefrom; 727 mtpar.hook_mask = e->comefrom;
713 mtpar.family = NFPROTO_IPV6; 728 mtpar.family = NFPROTO_IPV6;
714 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j); 729 xt_ematch_foreach(ematch, e) {
715 if (ret != 0) 730 ret = find_check_match(ematch, &mtpar);
716 goto cleanup_matches; 731 if (ret != 0)
732 goto cleanup_matches;
733 ++j;
734 }
717 735
718 t = ip6t_get_target(e); 736 t = ip6t_get_target(e);
719 target = try_then_request_module(xt_find_target(AF_INET6, 737 target = try_then_request_module(xt_find_target(AF_INET6,
@@ -727,27 +745,29 @@ find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
727 } 745 }
728 t->u.kernel.target = target; 746 t->u.kernel.target = target;
729 747
730 ret = check_target(e, name); 748 ret = check_target(e, net, name);
731 if (ret) 749 if (ret)
732 goto err; 750 goto err;
733
734 (*i)++;
735 return 0; 751 return 0;
736 err: 752 err:
737 module_put(t->u.kernel.target->me); 753 module_put(t->u.kernel.target->me);
738 cleanup_matches: 754 cleanup_matches:
739 IP6T_MATCH_ITERATE(e, cleanup_match, &j); 755 xt_ematch_foreach(ematch, e) {
756 if (j-- == 0)
757 break;
758 cleanup_match(ematch, net);
759 }
740 return ret; 760 return ret;
741} 761}
742 762
743static bool check_underflow(struct ip6t_entry *e) 763static bool check_underflow(const struct ip6t_entry *e)
744{ 764{
745 const struct ip6t_entry_target *t; 765 const struct ip6t_entry_target *t;
746 unsigned int verdict; 766 unsigned int verdict;
747 767
748 if (!unconditional(&e->ipv6)) 768 if (!unconditional(&e->ipv6))
749 return false; 769 return false;
750 t = ip6t_get_target(e); 770 t = ip6t_get_target_c(e);
751 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) 771 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
752 return false; 772 return false;
753 verdict = ((struct ip6t_standard_target *)t)->verdict; 773 verdict = ((struct ip6t_standard_target *)t)->verdict;
@@ -758,12 +778,11 @@ static bool check_underflow(struct ip6t_entry *e)
758static int 778static int
759check_entry_size_and_hooks(struct ip6t_entry *e, 779check_entry_size_and_hooks(struct ip6t_entry *e,
760 struct xt_table_info *newinfo, 780 struct xt_table_info *newinfo,
761 unsigned char *base, 781 const unsigned char *base,
762 unsigned char *limit, 782 const unsigned char *limit,
763 const unsigned int *hook_entries, 783 const unsigned int *hook_entries,
764 const unsigned int *underflows, 784 const unsigned int *underflows,
765 unsigned int valid_hooks, 785 unsigned int valid_hooks)
766 unsigned int *i)
767{ 786{
768 unsigned int h; 787 unsigned int h;
769 788
@@ -800,50 +819,41 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
800 /* Clear counters and comefrom */ 819 /* Clear counters and comefrom */
801 e->counters = ((struct xt_counters) { 0, 0 }); 820 e->counters = ((struct xt_counters) { 0, 0 });
802 e->comefrom = 0; 821 e->comefrom = 0;
803
804 (*i)++;
805 return 0; 822 return 0;
806} 823}
807 824
808static int 825static void cleanup_entry(struct ip6t_entry *e, struct net *net)
809cleanup_entry(struct ip6t_entry *e, unsigned int *i)
810{ 826{
811 struct xt_tgdtor_param par; 827 struct xt_tgdtor_param par;
812 struct ip6t_entry_target *t; 828 struct ip6t_entry_target *t;
813 829 struct xt_entry_match *ematch;
814 if (i && (*i)-- == 0)
815 return 1;
816 830
817 /* Cleanup all matches */ 831 /* Cleanup all matches */
818 IP6T_MATCH_ITERATE(e, cleanup_match, NULL); 832 xt_ematch_foreach(ematch, e)
833 cleanup_match(ematch, net);
819 t = ip6t_get_target(e); 834 t = ip6t_get_target(e);
820 835
836 par.net = net;
821 par.target = t->u.kernel.target; 837 par.target = t->u.kernel.target;
822 par.targinfo = t->data; 838 par.targinfo = t->data;
823 par.family = NFPROTO_IPV6; 839 par.family = NFPROTO_IPV6;
824 if (par.target->destroy != NULL) 840 if (par.target->destroy != NULL)
825 par.target->destroy(&par); 841 par.target->destroy(&par);
826 module_put(par.target->me); 842 module_put(par.target->me);
827 return 0;
828} 843}
829 844
830/* Checks and translates the user-supplied table segment (held in 845/* Checks and translates the user-supplied table segment (held in
831 newinfo) */ 846 newinfo) */
832static int 847static int
833translate_table(const char *name, 848translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
834 unsigned int valid_hooks, 849 const struct ip6t_replace *repl)
835 struct xt_table_info *newinfo,
836 void *entry0,
837 unsigned int size,
838 unsigned int number,
839 const unsigned int *hook_entries,
840 const unsigned int *underflows)
841{ 850{
851 struct ip6t_entry *iter;
842 unsigned int i; 852 unsigned int i;
843 int ret; 853 int ret = 0;
844 854
845 newinfo->size = size; 855 newinfo->size = repl->size;
846 newinfo->number = number; 856 newinfo->number = repl->num_entries;
847 857
848 /* Init all hooks to impossible value. */ 858 /* Init all hooks to impossible value. */
849 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 859 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
@@ -854,49 +864,58 @@ translate_table(const char *name,
854 duprintf("translate_table: size %u\n", newinfo->size); 864 duprintf("translate_table: size %u\n", newinfo->size);
855 i = 0; 865 i = 0;
856 /* Walk through entries, checking offsets. */ 866 /* Walk through entries, checking offsets. */
857 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size, 867 xt_entry_foreach(iter, entry0, newinfo->size) {
858 check_entry_size_and_hooks, 868 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
859 newinfo, 869 entry0 + repl->size,
860 entry0, 870 repl->hook_entry,
861 entry0 + size, 871 repl->underflow,
862 hook_entries, underflows, valid_hooks, &i); 872 repl->valid_hooks);
863 if (ret != 0) 873 if (ret != 0)
864 return ret; 874 return ret;
875 ++i;
876 }
865 877
866 if (i != number) { 878 if (i != repl->num_entries) {
867 duprintf("translate_table: %u not %u entries\n", 879 duprintf("translate_table: %u not %u entries\n",
868 i, number); 880 i, repl->num_entries);
869 return -EINVAL; 881 return -EINVAL;
870 } 882 }
871 883
872 /* Check hooks all assigned */ 884 /* Check hooks all assigned */
873 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 885 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
874 /* Only hooks which are valid */ 886 /* Only hooks which are valid */
875 if (!(valid_hooks & (1 << i))) 887 if (!(repl->valid_hooks & (1 << i)))
876 continue; 888 continue;
877 if (newinfo->hook_entry[i] == 0xFFFFFFFF) { 889 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
878 duprintf("Invalid hook entry %u %u\n", 890 duprintf("Invalid hook entry %u %u\n",
879 i, hook_entries[i]); 891 i, repl->hook_entry[i]);
880 return -EINVAL; 892 return -EINVAL;
881 } 893 }
882 if (newinfo->underflow[i] == 0xFFFFFFFF) { 894 if (newinfo->underflow[i] == 0xFFFFFFFF) {
883 duprintf("Invalid underflow %u %u\n", 895 duprintf("Invalid underflow %u %u\n",
884 i, underflows[i]); 896 i, repl->underflow[i]);
885 return -EINVAL; 897 return -EINVAL;
886 } 898 }
887 } 899 }
888 900
889 if (!mark_source_chains(newinfo, valid_hooks, entry0)) 901 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
890 return -ELOOP; 902 return -ELOOP;
891 903
892 /* Finally, each sanity check must pass */ 904 /* Finally, each sanity check must pass */
893 i = 0; 905 i = 0;
894 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size, 906 xt_entry_foreach(iter, entry0, newinfo->size) {
895 find_check_entry, name, size, &i); 907 ret = find_check_entry(iter, net, repl->name, repl->size);
908 if (ret != 0)
909 break;
910 ++i;
911 }
896 912
897 if (ret != 0) { 913 if (ret != 0) {
898 IP6T_ENTRY_ITERATE(entry0, newinfo->size, 914 xt_entry_foreach(iter, entry0, newinfo->size) {
899 cleanup_entry, &i); 915 if (i-- == 0)
916 break;
917 cleanup_entry(iter, net);
918 }
900 return ret; 919 return ret;
901 } 920 }
902 921
@@ -909,33 +928,11 @@ translate_table(const char *name,
909 return ret; 928 return ret;
910} 929}
911 930
912/* Gets counters. */
913static inline int
914add_entry_to_counter(const struct ip6t_entry *e,
915 struct xt_counters total[],
916 unsigned int *i)
917{
918 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
919
920 (*i)++;
921 return 0;
922}
923
924static inline int
925set_entry_to_counter(const struct ip6t_entry *e,
926 struct ip6t_counters total[],
927 unsigned int *i)
928{
929 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
930
931 (*i)++;
932 return 0;
933}
934
935static void 931static void
936get_counters(const struct xt_table_info *t, 932get_counters(const struct xt_table_info *t,
937 struct xt_counters counters[]) 933 struct xt_counters counters[])
938{ 934{
935 struct ip6t_entry *iter;
939 unsigned int cpu; 936 unsigned int cpu;
940 unsigned int i; 937 unsigned int i;
941 unsigned int curcpu; 938 unsigned int curcpu;
@@ -951,32 +948,32 @@ get_counters(const struct xt_table_info *t,
951 curcpu = smp_processor_id(); 948 curcpu = smp_processor_id();
952 949
953 i = 0; 950 i = 0;
954 IP6T_ENTRY_ITERATE(t->entries[curcpu], 951 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
955 t->size, 952 SET_COUNTER(counters[i], iter->counters.bcnt,
956 set_entry_to_counter, 953 iter->counters.pcnt);
957 counters, 954 ++i;
958 &i); 955 }
959 956
960 for_each_possible_cpu(cpu) { 957 for_each_possible_cpu(cpu) {
961 if (cpu == curcpu) 958 if (cpu == curcpu)
962 continue; 959 continue;
963 i = 0; 960 i = 0;
964 xt_info_wrlock(cpu); 961 xt_info_wrlock(cpu);
965 IP6T_ENTRY_ITERATE(t->entries[cpu], 962 xt_entry_foreach(iter, t->entries[cpu], t->size) {
966 t->size, 963 ADD_COUNTER(counters[i], iter->counters.bcnt,
967 add_entry_to_counter, 964 iter->counters.pcnt);
968 counters, 965 ++i;
969 &i); 966 }
970 xt_info_wrunlock(cpu); 967 xt_info_wrunlock(cpu);
971 } 968 }
972 local_bh_enable(); 969 local_bh_enable();
973} 970}
974 971
975static struct xt_counters *alloc_counters(struct xt_table *table) 972static struct xt_counters *alloc_counters(const struct xt_table *table)
976{ 973{
977 unsigned int countersize; 974 unsigned int countersize;
978 struct xt_counters *counters; 975 struct xt_counters *counters;
979 struct xt_table_info *private = table->private; 976 const struct xt_table_info *private = table->private;
980 977
981 /* We need atomic snapshot of counters: rest doesn't change 978 /* We need atomic snapshot of counters: rest doesn't change
982 (other than comefrom, which userspace doesn't care 979 (other than comefrom, which userspace doesn't care
@@ -994,11 +991,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
994 991
995static int 992static int
996copy_entries_to_user(unsigned int total_size, 993copy_entries_to_user(unsigned int total_size,
997 struct xt_table *table, 994 const struct xt_table *table,
998 void __user *userptr) 995 void __user *userptr)
999{ 996{
1000 unsigned int off, num; 997 unsigned int off, num;
1001 struct ip6t_entry *e; 998 const struct ip6t_entry *e;
1002 struct xt_counters *counters; 999 struct xt_counters *counters;
1003 const struct xt_table_info *private = table->private; 1000 const struct xt_table_info *private = table->private;
1004 int ret = 0; 1001 int ret = 0;
@@ -1050,7 +1047,7 @@ copy_entries_to_user(unsigned int total_size,
1050 } 1047 }
1051 } 1048 }
1052 1049
1053 t = ip6t_get_target(e); 1050 t = ip6t_get_target_c(e);
1054 if (copy_to_user(userptr + off + e->target_offset 1051 if (copy_to_user(userptr + off + e->target_offset
1055 + offsetof(struct ip6t_entry_target, 1052 + offsetof(struct ip6t_entry_target,
1056 u.user.name), 1053 u.user.name),
@@ -1067,7 +1064,7 @@ copy_entries_to_user(unsigned int total_size,
1067} 1064}
1068 1065
1069#ifdef CONFIG_COMPAT 1066#ifdef CONFIG_COMPAT
1070static void compat_standard_from_user(void *dst, void *src) 1067static void compat_standard_from_user(void *dst, const void *src)
1071{ 1068{
1072 int v = *(compat_int_t *)src; 1069 int v = *(compat_int_t *)src;
1073 1070
@@ -1076,7 +1073,7 @@ static void compat_standard_from_user(void *dst, void *src)
1076 memcpy(dst, &v, sizeof(v)); 1073 memcpy(dst, &v, sizeof(v));
1077} 1074}
1078 1075
1079static int compat_standard_to_user(void __user *dst, void *src) 1076static int compat_standard_to_user(void __user *dst, const void *src)
1080{ 1077{
1081 compat_int_t cv = *(int *)src; 1078 compat_int_t cv = *(int *)src;
1082 1079
@@ -1085,25 +1082,20 @@ static int compat_standard_to_user(void __user *dst, void *src)
1085 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; 1082 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1086} 1083}
1087 1084
1088static inline int 1085static int compat_calc_entry(const struct ip6t_entry *e,
1089compat_calc_match(struct ip6t_entry_match *m, int *size)
1090{
1091 *size += xt_compat_match_offset(m->u.kernel.match);
1092 return 0;
1093}
1094
1095static int compat_calc_entry(struct ip6t_entry *e,
1096 const struct xt_table_info *info, 1086 const struct xt_table_info *info,
1097 void *base, struct xt_table_info *newinfo) 1087 const void *base, struct xt_table_info *newinfo)
1098{ 1088{
1099 struct ip6t_entry_target *t; 1089 const struct xt_entry_match *ematch;
1090 const struct ip6t_entry_target *t;
1100 unsigned int entry_offset; 1091 unsigned int entry_offset;
1101 int off, i, ret; 1092 int off, i, ret;
1102 1093
1103 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); 1094 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1104 entry_offset = (void *)e - base; 1095 entry_offset = (void *)e - base;
1105 IP6T_MATCH_ITERATE(e, compat_calc_match, &off); 1096 xt_ematch_foreach(ematch, e)
1106 t = ip6t_get_target(e); 1097 off += xt_compat_match_offset(ematch->u.kernel.match);
1098 t = ip6t_get_target_c(e);
1107 off += xt_compat_target_offset(t->u.kernel.target); 1099 off += xt_compat_target_offset(t->u.kernel.target);
1108 newinfo->size -= off; 1100 newinfo->size -= off;
1109 ret = xt_compat_add_offset(AF_INET6, entry_offset, off); 1101 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
@@ -1124,7 +1116,9 @@ static int compat_calc_entry(struct ip6t_entry *e,
1124static int compat_table_info(const struct xt_table_info *info, 1116static int compat_table_info(const struct xt_table_info *info,
1125 struct xt_table_info *newinfo) 1117 struct xt_table_info *newinfo)
1126{ 1118{
1119 struct ip6t_entry *iter;
1127 void *loc_cpu_entry; 1120 void *loc_cpu_entry;
1121 int ret;
1128 1122
1129 if (!newinfo || !info) 1123 if (!newinfo || !info)
1130 return -EINVAL; 1124 return -EINVAL;
@@ -1133,13 +1127,17 @@ static int compat_table_info(const struct xt_table_info *info,
1133 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); 1127 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1134 newinfo->initial_entries = 0; 1128 newinfo->initial_entries = 0;
1135 loc_cpu_entry = info->entries[raw_smp_processor_id()]; 1129 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1136 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size, 1130 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1137 compat_calc_entry, info, loc_cpu_entry, 1131 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1138 newinfo); 1132 if (ret != 0)
1133 return ret;
1134 }
1135 return 0;
1139} 1136}
1140#endif 1137#endif
1141 1138
1142static int get_info(struct net *net, void __user *user, int *len, int compat) 1139static int get_info(struct net *net, void __user *user,
1140 const int *len, int compat)
1143{ 1141{
1144 char name[IP6T_TABLE_MAXNAMELEN]; 1142 char name[IP6T_TABLE_MAXNAMELEN];
1145 struct xt_table *t; 1143 struct xt_table *t;
@@ -1164,10 +1162,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
1164 if (t && !IS_ERR(t)) { 1162 if (t && !IS_ERR(t)) {
1165 struct ip6t_getinfo info; 1163 struct ip6t_getinfo info;
1166 const struct xt_table_info *private = t->private; 1164 const struct xt_table_info *private = t->private;
1167
1168#ifdef CONFIG_COMPAT 1165#ifdef CONFIG_COMPAT
1166 struct xt_table_info tmp;
1167
1169 if (compat) { 1168 if (compat) {
1170 struct xt_table_info tmp;
1171 ret = compat_table_info(private, &tmp); 1169 ret = compat_table_info(private, &tmp);
1172 xt_compat_flush_offsets(AF_INET6); 1170 xt_compat_flush_offsets(AF_INET6);
1173 private = &tmp; 1171 private = &tmp;
@@ -1199,7 +1197,8 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
1199} 1197}
1200 1198
1201static int 1199static int
1202get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len) 1200get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1201 const int *len)
1203{ 1202{
1204 int ret; 1203 int ret;
1205 struct ip6t_get_entries get; 1204 struct ip6t_get_entries get;
@@ -1247,6 +1246,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1247 struct xt_table_info *oldinfo; 1246 struct xt_table_info *oldinfo;
1248 struct xt_counters *counters; 1247 struct xt_counters *counters;
1249 const void *loc_cpu_old_entry; 1248 const void *loc_cpu_old_entry;
1249 struct ip6t_entry *iter;
1250 1250
1251 ret = 0; 1251 ret = 0;
1252 counters = vmalloc_node(num_counters * sizeof(struct xt_counters), 1252 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
@@ -1290,8 +1290,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1290 1290
1291 /* Decrease module usage counts and free resource */ 1291 /* Decrease module usage counts and free resource */
1292 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1292 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1293 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, 1293 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1294 NULL); 1294 cleanup_entry(iter, net);
1295
1295 xt_free_table_info(oldinfo); 1296 xt_free_table_info(oldinfo);
1296 if (copy_to_user(counters_ptr, counters, 1297 if (copy_to_user(counters_ptr, counters,
1297 sizeof(struct xt_counters) * num_counters) != 0) 1298 sizeof(struct xt_counters) * num_counters) != 0)
@@ -1310,12 +1311,13 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1310} 1311}
1311 1312
1312static int 1313static int
1313do_replace(struct net *net, void __user *user, unsigned int len) 1314do_replace(struct net *net, const void __user *user, unsigned int len)
1314{ 1315{
1315 int ret; 1316 int ret;
1316 struct ip6t_replace tmp; 1317 struct ip6t_replace tmp;
1317 struct xt_table_info *newinfo; 1318 struct xt_table_info *newinfo;
1318 void *loc_cpu_entry; 1319 void *loc_cpu_entry;
1320 struct ip6t_entry *iter;
1319 1321
1320 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1322 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1321 return -EFAULT; 1323 return -EFAULT;
@@ -1336,9 +1338,7 @@ do_replace(struct net *net, void __user *user, unsigned int len)
1336 goto free_newinfo; 1338 goto free_newinfo;
1337 } 1339 }
1338 1340
1339 ret = translate_table(tmp.name, tmp.valid_hooks, 1341 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1340 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1341 tmp.hook_entry, tmp.underflow);
1342 if (ret != 0) 1342 if (ret != 0)
1343 goto free_newinfo; 1343 goto free_newinfo;
1344 1344
@@ -1351,27 +1351,15 @@ do_replace(struct net *net, void __user *user, unsigned int len)
1351 return 0; 1351 return 0;
1352 1352
1353 free_newinfo_untrans: 1353 free_newinfo_untrans:
1354 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1354 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1355 cleanup_entry(iter, net);
1355 free_newinfo: 1356 free_newinfo:
1356 xt_free_table_info(newinfo); 1357 xt_free_table_info(newinfo);
1357 return ret; 1358 return ret;
1358} 1359}
1359 1360
1360/* We're lazy, and add to the first CPU; overflow works its fey magic
1361 * and everything is OK. */
1362static int 1361static int
1363add_counter_to_entry(struct ip6t_entry *e, 1362do_add_counters(struct net *net, const void __user *user, unsigned int len,
1364 const struct xt_counters addme[],
1365 unsigned int *i)
1366{
1367 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1368
1369 (*i)++;
1370 return 0;
1371}
1372
1373static int
1374do_add_counters(struct net *net, void __user *user, unsigned int len,
1375 int compat) 1363 int compat)
1376{ 1364{
1377 unsigned int i, curcpu; 1365 unsigned int i, curcpu;
@@ -1385,6 +1373,7 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
1385 const struct xt_table_info *private; 1373 const struct xt_table_info *private;
1386 int ret = 0; 1374 int ret = 0;
1387 const void *loc_cpu_entry; 1375 const void *loc_cpu_entry;
1376 struct ip6t_entry *iter;
1388#ifdef CONFIG_COMPAT 1377#ifdef CONFIG_COMPAT
1389 struct compat_xt_counters_info compat_tmp; 1378 struct compat_xt_counters_info compat_tmp;
1390 1379
@@ -1443,11 +1432,10 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
1443 curcpu = smp_processor_id(); 1432 curcpu = smp_processor_id();
1444 xt_info_wrlock(curcpu); 1433 xt_info_wrlock(curcpu);
1445 loc_cpu_entry = private->entries[curcpu]; 1434 loc_cpu_entry = private->entries[curcpu];
1446 IP6T_ENTRY_ITERATE(loc_cpu_entry, 1435 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1447 private->size, 1436 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1448 add_counter_to_entry, 1437 ++i;
1449 paddc, 1438 }
1450 &i);
1451 xt_info_wrunlock(curcpu); 1439 xt_info_wrunlock(curcpu);
1452 1440
1453 unlock_up_free: 1441 unlock_up_free:
@@ -1476,45 +1464,40 @@ struct compat_ip6t_replace {
1476static int 1464static int
1477compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr, 1465compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1478 unsigned int *size, struct xt_counters *counters, 1466 unsigned int *size, struct xt_counters *counters,
1479 unsigned int *i) 1467 unsigned int i)
1480{ 1468{
1481 struct ip6t_entry_target *t; 1469 struct ip6t_entry_target *t;
1482 struct compat_ip6t_entry __user *ce; 1470 struct compat_ip6t_entry __user *ce;
1483 u_int16_t target_offset, next_offset; 1471 u_int16_t target_offset, next_offset;
1484 compat_uint_t origsize; 1472 compat_uint_t origsize;
1485 int ret; 1473 const struct xt_entry_match *ematch;
1474 int ret = 0;
1486 1475
1487 ret = -EFAULT;
1488 origsize = *size; 1476 origsize = *size;
1489 ce = (struct compat_ip6t_entry __user *)*dstptr; 1477 ce = (struct compat_ip6t_entry __user *)*dstptr;
1490 if (copy_to_user(ce, e, sizeof(struct ip6t_entry))) 1478 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1491 goto out; 1479 copy_to_user(&ce->counters, &counters[i],
1492 1480 sizeof(counters[i])) != 0)
1493 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i]))) 1481 return -EFAULT;
1494 goto out;
1495 1482
1496 *dstptr += sizeof(struct compat_ip6t_entry); 1483 *dstptr += sizeof(struct compat_ip6t_entry);
1497 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); 1484 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1498 1485
1499 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size); 1486 xt_ematch_foreach(ematch, e) {
1487 ret = xt_compat_match_to_user(ematch, dstptr, size);
1488 if (ret != 0)
1489 return ret;
1490 }
1500 target_offset = e->target_offset - (origsize - *size); 1491 target_offset = e->target_offset - (origsize - *size);
1501 if (ret)
1502 goto out;
1503 t = ip6t_get_target(e); 1492 t = ip6t_get_target(e);
1504 ret = xt_compat_target_to_user(t, dstptr, size); 1493 ret = xt_compat_target_to_user(t, dstptr, size);
1505 if (ret) 1494 if (ret)
1506 goto out; 1495 return ret;
1507 ret = -EFAULT;
1508 next_offset = e->next_offset - (origsize - *size); 1496 next_offset = e->next_offset - (origsize - *size);
1509 if (put_user(target_offset, &ce->target_offset)) 1497 if (put_user(target_offset, &ce->target_offset) != 0 ||
1510 goto out; 1498 put_user(next_offset, &ce->next_offset) != 0)
1511 if (put_user(next_offset, &ce->next_offset)) 1499 return -EFAULT;
1512 goto out;
1513
1514 (*i)++;
1515 return 0; 1500 return 0;
1516out:
1517 return ret;
1518} 1501}
1519 1502
1520static int 1503static int
@@ -1522,7 +1505,7 @@ compat_find_calc_match(struct ip6t_entry_match *m,
1522 const char *name, 1505 const char *name,
1523 const struct ip6t_ip6 *ipv6, 1506 const struct ip6t_ip6 *ipv6,
1524 unsigned int hookmask, 1507 unsigned int hookmask,
1525 int *size, unsigned int *i) 1508 int *size)
1526{ 1509{
1527 struct xt_match *match; 1510 struct xt_match *match;
1528 1511
@@ -1536,47 +1519,32 @@ compat_find_calc_match(struct ip6t_entry_match *m,
1536 } 1519 }
1537 m->u.kernel.match = match; 1520 m->u.kernel.match = match;
1538 *size += xt_compat_match_offset(match); 1521 *size += xt_compat_match_offset(match);
1539
1540 (*i)++;
1541 return 0;
1542}
1543
1544static int
1545compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1546{
1547 if (i && (*i)-- == 0)
1548 return 1;
1549
1550 module_put(m->u.kernel.match->me);
1551 return 0; 1522 return 0;
1552} 1523}
1553 1524
1554static int 1525static void compat_release_entry(struct compat_ip6t_entry *e)
1555compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1556{ 1526{
1557 struct ip6t_entry_target *t; 1527 struct ip6t_entry_target *t;
1558 1528 struct xt_entry_match *ematch;
1559 if (i && (*i)-- == 0)
1560 return 1;
1561 1529
1562 /* Cleanup all matches */ 1530 /* Cleanup all matches */
1563 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL); 1531 xt_ematch_foreach(ematch, e)
1532 module_put(ematch->u.kernel.match->me);
1564 t = compat_ip6t_get_target(e); 1533 t = compat_ip6t_get_target(e);
1565 module_put(t->u.kernel.target->me); 1534 module_put(t->u.kernel.target->me);
1566 return 0;
1567} 1535}
1568 1536
1569static int 1537static int
1570check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, 1538check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1571 struct xt_table_info *newinfo, 1539 struct xt_table_info *newinfo,
1572 unsigned int *size, 1540 unsigned int *size,
1573 unsigned char *base, 1541 const unsigned char *base,
1574 unsigned char *limit, 1542 const unsigned char *limit,
1575 unsigned int *hook_entries, 1543 const unsigned int *hook_entries,
1576 unsigned int *underflows, 1544 const unsigned int *underflows,
1577 unsigned int *i,
1578 const char *name) 1545 const char *name)
1579{ 1546{
1547 struct xt_entry_match *ematch;
1580 struct ip6t_entry_target *t; 1548 struct ip6t_entry_target *t;
1581 struct xt_target *target; 1549 struct xt_target *target;
1582 unsigned int entry_offset; 1550 unsigned int entry_offset;
@@ -1605,10 +1573,13 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1605 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); 1573 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1606 entry_offset = (void *)e - (void *)base; 1574 entry_offset = (void *)e - (void *)base;
1607 j = 0; 1575 j = 0;
1608 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name, 1576 xt_ematch_foreach(ematch, e) {
1609 &e->ipv6, e->comefrom, &off, &j); 1577 ret = compat_find_calc_match(ematch, name,
1610 if (ret != 0) 1578 &e->ipv6, e->comefrom, &off);
1611 goto release_matches; 1579 if (ret != 0)
1580 goto release_matches;
1581 ++j;
1582 }
1612 1583
1613 t = compat_ip6t_get_target(e); 1584 t = compat_ip6t_get_target(e);
1614 target = try_then_request_module(xt_find_target(AF_INET6, 1585 target = try_then_request_module(xt_find_target(AF_INET6,
@@ -1640,14 +1611,16 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1640 /* Clear counters and comefrom */ 1611 /* Clear counters and comefrom */
1641 memset(&e->counters, 0, sizeof(e->counters)); 1612 memset(&e->counters, 0, sizeof(e->counters));
1642 e->comefrom = 0; 1613 e->comefrom = 0;
1643
1644 (*i)++;
1645 return 0; 1614 return 0;
1646 1615
1647out: 1616out:
1648 module_put(t->u.kernel.target->me); 1617 module_put(t->u.kernel.target->me);
1649release_matches: 1618release_matches:
1650 IP6T_MATCH_ITERATE(e, compat_release_match, &j); 1619 xt_ematch_foreach(ematch, e) {
1620 if (j-- == 0)
1621 break;
1622 module_put(ematch->u.kernel.match->me);
1623 }
1651 return ret; 1624 return ret;
1652} 1625}
1653 1626
@@ -1661,6 +1634,7 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1661 struct ip6t_entry *de; 1634 struct ip6t_entry *de;
1662 unsigned int origsize; 1635 unsigned int origsize;
1663 int ret, h; 1636 int ret, h;
1637 struct xt_entry_match *ematch;
1664 1638
1665 ret = 0; 1639 ret = 0;
1666 origsize = *size; 1640 origsize = *size;
@@ -1671,10 +1645,11 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1671 *dstptr += sizeof(struct ip6t_entry); 1645 *dstptr += sizeof(struct ip6t_entry);
1672 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); 1646 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1673 1647
1674 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user, 1648 xt_ematch_foreach(ematch, e) {
1675 dstptr, size); 1649 ret = xt_compat_match_from_user(ematch, dstptr, size);
1676 if (ret) 1650 if (ret != 0)
1677 return ret; 1651 return ret;
1652 }
1678 de->target_offset = e->target_offset - (origsize - *size); 1653 de->target_offset = e->target_offset - (origsize - *size);
1679 t = compat_ip6t_get_target(e); 1654 t = compat_ip6t_get_target(e);
1680 target = t->u.kernel.target; 1655 target = t->u.kernel.target;
@@ -1690,36 +1665,44 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1690 return ret; 1665 return ret;
1691} 1666}
1692 1667
1693static int compat_check_entry(struct ip6t_entry *e, const char *name, 1668static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1694 unsigned int *i) 1669 const char *name)
1695{ 1670{
1696 unsigned int j; 1671 unsigned int j;
1697 int ret; 1672 int ret = 0;
1698 struct xt_mtchk_param mtpar; 1673 struct xt_mtchk_param mtpar;
1674 struct xt_entry_match *ematch;
1699 1675
1700 j = 0; 1676 j = 0;
1677 mtpar.net = net;
1701 mtpar.table = name; 1678 mtpar.table = name;
1702 mtpar.entryinfo = &e->ipv6; 1679 mtpar.entryinfo = &e->ipv6;
1703 mtpar.hook_mask = e->comefrom; 1680 mtpar.hook_mask = e->comefrom;
1704 mtpar.family = NFPROTO_IPV6; 1681 mtpar.family = NFPROTO_IPV6;
1705 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j); 1682 xt_ematch_foreach(ematch, e) {
1706 if (ret) 1683 ret = check_match(ematch, &mtpar);
1707 goto cleanup_matches; 1684 if (ret != 0)
1685 goto cleanup_matches;
1686 ++j;
1687 }
1708 1688
1709 ret = check_target(e, name); 1689 ret = check_target(e, net, name);
1710 if (ret) 1690 if (ret)
1711 goto cleanup_matches; 1691 goto cleanup_matches;
1712
1713 (*i)++;
1714 return 0; 1692 return 0;
1715 1693
1716 cleanup_matches: 1694 cleanup_matches:
1717 IP6T_MATCH_ITERATE(e, cleanup_match, &j); 1695 xt_ematch_foreach(ematch, e) {
1696 if (j-- == 0)
1697 break;
1698 cleanup_match(ematch, net);
1699 }
1718 return ret; 1700 return ret;
1719} 1701}
1720 1702
1721static int 1703static int
1722translate_compat_table(const char *name, 1704translate_compat_table(struct net *net,
1705 const char *name,
1723 unsigned int valid_hooks, 1706 unsigned int valid_hooks,
1724 struct xt_table_info **pinfo, 1707 struct xt_table_info **pinfo,
1725 void **pentry0, 1708 void **pentry0,
@@ -1731,8 +1714,10 @@ translate_compat_table(const char *name,
1731 unsigned int i, j; 1714 unsigned int i, j;
1732 struct xt_table_info *newinfo, *info; 1715 struct xt_table_info *newinfo, *info;
1733 void *pos, *entry0, *entry1; 1716 void *pos, *entry0, *entry1;
1717 struct compat_ip6t_entry *iter0;
1718 struct ip6t_entry *iter1;
1734 unsigned int size; 1719 unsigned int size;
1735 int ret; 1720 int ret = 0;
1736 1721
1737 info = *pinfo; 1722 info = *pinfo;
1738 entry0 = *pentry0; 1723 entry0 = *pentry0;
@@ -1749,13 +1734,17 @@ translate_compat_table(const char *name,
1749 j = 0; 1734 j = 0;
1750 xt_compat_lock(AF_INET6); 1735 xt_compat_lock(AF_INET6);
1751 /* Walk through entries, checking offsets. */ 1736 /* Walk through entries, checking offsets. */
1752 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, 1737 xt_entry_foreach(iter0, entry0, total_size) {
1753 check_compat_entry_size_and_hooks, 1738 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1754 info, &size, entry0, 1739 entry0,
1755 entry0 + total_size, 1740 entry0 + total_size,
1756 hook_entries, underflows, &j, name); 1741 hook_entries,
1757 if (ret != 0) 1742 underflows,
1758 goto out_unlock; 1743 name);
1744 if (ret != 0)
1745 goto out_unlock;
1746 ++j;
1747 }
1759 1748
1760 ret = -EINVAL; 1749 ret = -EINVAL;
1761 if (j != number) { 1750 if (j != number) {
@@ -1794,9 +1783,12 @@ translate_compat_table(const char *name,
1794 entry1 = newinfo->entries[raw_smp_processor_id()]; 1783 entry1 = newinfo->entries[raw_smp_processor_id()];
1795 pos = entry1; 1784 pos = entry1;
1796 size = total_size; 1785 size = total_size;
1797 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, 1786 xt_entry_foreach(iter0, entry0, total_size) {
1798 compat_copy_entry_from_user, 1787 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1799 &pos, &size, name, newinfo, entry1); 1788 name, newinfo, entry1);
1789 if (ret != 0)
1790 break;
1791 }
1800 xt_compat_flush_offsets(AF_INET6); 1792 xt_compat_flush_offsets(AF_INET6);
1801 xt_compat_unlock(AF_INET6); 1793 xt_compat_unlock(AF_INET6);
1802 if (ret) 1794 if (ret)
@@ -1807,13 +1799,32 @@ translate_compat_table(const char *name,
1807 goto free_newinfo; 1799 goto free_newinfo;
1808 1800
1809 i = 0; 1801 i = 0;
1810 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry, 1802 xt_entry_foreach(iter1, entry1, newinfo->size) {
1811 name, &i); 1803 ret = compat_check_entry(iter1, net, name);
1804 if (ret != 0)
1805 break;
1806 ++i;
1807 }
1812 if (ret) { 1808 if (ret) {
1809 /*
1810 * The first i matches need cleanup_entry (calls ->destroy)
1811 * because they had called ->check already. The other j-i
1812 * entries need only release.
1813 */
1814 int skip = i;
1813 j -= i; 1815 j -= i;
1814 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i, 1816 xt_entry_foreach(iter0, entry0, newinfo->size) {
1815 compat_release_entry, &j); 1817 if (skip-- > 0)
1816 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i); 1818 continue;
1819 if (j-- == 0)
1820 break;
1821 compat_release_entry(iter0);
1822 }
1823 xt_entry_foreach(iter1, entry1, newinfo->size) {
1824 if (i-- == 0)
1825 break;
1826 cleanup_entry(iter1, net);
1827 }
1817 xt_free_table_info(newinfo); 1828 xt_free_table_info(newinfo);
1818 return ret; 1829 return ret;
1819 } 1830 }
@@ -1831,7 +1842,11 @@ translate_compat_table(const char *name,
1831free_newinfo: 1842free_newinfo:
1832 xt_free_table_info(newinfo); 1843 xt_free_table_info(newinfo);
1833out: 1844out:
1834 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j); 1845 xt_entry_foreach(iter0, entry0, total_size) {
1846 if (j-- == 0)
1847 break;
1848 compat_release_entry(iter0);
1849 }
1835 return ret; 1850 return ret;
1836out_unlock: 1851out_unlock:
1837 xt_compat_flush_offsets(AF_INET6); 1852 xt_compat_flush_offsets(AF_INET6);
@@ -1846,6 +1861,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1846 struct compat_ip6t_replace tmp; 1861 struct compat_ip6t_replace tmp;
1847 struct xt_table_info *newinfo; 1862 struct xt_table_info *newinfo;
1848 void *loc_cpu_entry; 1863 void *loc_cpu_entry;
1864 struct ip6t_entry *iter;
1849 1865
1850 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1866 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1851 return -EFAULT; 1867 return -EFAULT;
@@ -1868,7 +1884,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1868 goto free_newinfo; 1884 goto free_newinfo;
1869 } 1885 }
1870 1886
1871 ret = translate_compat_table(tmp.name, tmp.valid_hooks, 1887 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1872 &newinfo, &loc_cpu_entry, tmp.size, 1888 &newinfo, &loc_cpu_entry, tmp.size,
1873 tmp.num_entries, tmp.hook_entry, 1889 tmp.num_entries, tmp.hook_entry,
1874 tmp.underflow); 1890 tmp.underflow);
@@ -1884,7 +1900,8 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1884 return 0; 1900 return 0;
1885 1901
1886 free_newinfo_untrans: 1902 free_newinfo_untrans:
1887 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1903 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1904 cleanup_entry(iter, net);
1888 free_newinfo: 1905 free_newinfo:
1889 xt_free_table_info(newinfo); 1906 xt_free_table_info(newinfo);
1890 return ret; 1907 return ret;
@@ -1933,6 +1950,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1933 int ret = 0; 1950 int ret = 0;
1934 const void *loc_cpu_entry; 1951 const void *loc_cpu_entry;
1935 unsigned int i = 0; 1952 unsigned int i = 0;
1953 struct ip6t_entry *iter;
1936 1954
1937 counters = alloc_counters(table); 1955 counters = alloc_counters(table);
1938 if (IS_ERR(counters)) 1956 if (IS_ERR(counters))
@@ -1945,9 +1963,12 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1945 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1963 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1946 pos = userptr; 1964 pos = userptr;
1947 size = total_size; 1965 size = total_size;
1948 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size, 1966 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1949 compat_copy_entry_to_user, 1967 ret = compat_copy_entry_to_user(iter, &pos,
1950 &pos, &size, counters, &i); 1968 &size, counters, i++);
1969 if (ret != 0)
1970 break;
1971 }
1951 1972
1952 vfree(counters); 1973 vfree(counters);
1953 return ret; 1974 return ret;
@@ -2121,11 +2142,7 @@ struct xt_table *ip6t_register_table(struct net *net,
2121 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 2142 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2122 memcpy(loc_cpu_entry, repl->entries, repl->size); 2143 memcpy(loc_cpu_entry, repl->entries, repl->size);
2123 2144
2124 ret = translate_table(table->name, table->valid_hooks, 2145 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2125 newinfo, loc_cpu_entry, repl->size,
2126 repl->num_entries,
2127 repl->hook_entry,
2128 repl->underflow);
2129 if (ret != 0) 2146 if (ret != 0)
2130 goto out_free; 2147 goto out_free;
2131 2148
@@ -2142,17 +2159,19 @@ out:
2142 return ERR_PTR(ret); 2159 return ERR_PTR(ret);
2143} 2160}
2144 2161
2145void ip6t_unregister_table(struct xt_table *table) 2162void ip6t_unregister_table(struct net *net, struct xt_table *table)
2146{ 2163{
2147 struct xt_table_info *private; 2164 struct xt_table_info *private;
2148 void *loc_cpu_entry; 2165 void *loc_cpu_entry;
2149 struct module *table_owner = table->me; 2166 struct module *table_owner = table->me;
2167 struct ip6t_entry *iter;
2150 2168
2151 private = xt_unregister_table(table); 2169 private = xt_unregister_table(table);
2152 2170
2153 /* Decrease module usage counts and free resources */ 2171 /* Decrease module usage counts and free resources */
2154 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 2172 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2155 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL); 2173 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2174 cleanup_entry(iter, net);
2156 if (private->number > private->initial_entries) 2175 if (private->number > private->initial_entries)
2157 module_put(table_owner); 2176 module_put(table_owner);
2158 xt_free_table_info(private); 2177 xt_free_table_info(private);
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 8311ca31816a..dd8afbaf00a8 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -169,7 +169,7 @@ send_unreach(struct net *net, struct sk_buff *skb_in, unsigned char code,
169 if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL) 169 if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
170 skb_in->dev = net->loopback_dev; 170 skb_in->dev = net->loopback_dev;
171 171
172 icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0, NULL); 172 icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
173} 173}
174 174
175static unsigned int 175static unsigned int
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index ad378efd0eb8..36b72cafc227 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -21,99 +21,26 @@ MODULE_DESCRIPTION("ip6tables filter table");
21 (1 << NF_INET_FORWARD) | \ 21 (1 << NF_INET_FORWARD) | \
22 (1 << NF_INET_LOCAL_OUT)) 22 (1 << NF_INET_LOCAL_OUT))
23 23
24static struct
25{
26 struct ip6t_replace repl;
27 struct ip6t_standard entries[3];
28 struct ip6t_error term;
29} initial_table __net_initdata = {
30 .repl = {
31 .name = "filter",
32 .valid_hooks = FILTER_VALID_HOOKS,
33 .num_entries = 4,
34 .size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error),
35 .hook_entry = {
36 [NF_INET_LOCAL_IN] = 0,
37 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
38 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2
39 },
40 .underflow = {
41 [NF_INET_LOCAL_IN] = 0,
42 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
43 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2
44 },
45 },
46 .entries = {
47 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
48 IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
49 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
50 },
51 .term = IP6T_ERROR_INIT, /* ERROR */
52};
53
54static const struct xt_table packet_filter = { 24static const struct xt_table packet_filter = {
55 .name = "filter", 25 .name = "filter",
56 .valid_hooks = FILTER_VALID_HOOKS, 26 .valid_hooks = FILTER_VALID_HOOKS,
57 .me = THIS_MODULE, 27 .me = THIS_MODULE,
58 .af = NFPROTO_IPV6, 28 .af = NFPROTO_IPV6,
29 .priority = NF_IP6_PRI_FILTER,
59}; 30};
60 31
61/* The work comes in here from netfilter.c. */ 32/* The work comes in here from netfilter.c. */
62static unsigned int 33static unsigned int
63ip6t_in_hook(unsigned int hook, 34ip6table_filter_hook(unsigned int hook, struct sk_buff *skb,
64 struct sk_buff *skb, 35 const struct net_device *in, const struct net_device *out,
65 const struct net_device *in, 36 int (*okfn)(struct sk_buff *))
66 const struct net_device *out,
67 int (*okfn)(struct sk_buff *))
68{
69 return ip6t_do_table(skb, hook, in, out,
70 dev_net(in)->ipv6.ip6table_filter);
71}
72
73static unsigned int
74ip6t_local_out_hook(unsigned int hook,
75 struct sk_buff *skb,
76 const struct net_device *in,
77 const struct net_device *out,
78 int (*okfn)(struct sk_buff *))
79{ 37{
80#if 0 38 const struct net *net = dev_net((in != NULL) ? in : out);
81 /* root is playing with raw sockets. */
82 if (skb->len < sizeof(struct iphdr) ||
83 ip_hdrlen(skb) < sizeof(struct iphdr)) {
84 if (net_ratelimit())
85 printk("ip6t_hook: happy cracking.\n");
86 return NF_ACCEPT;
87 }
88#endif
89 39
90 return ip6t_do_table(skb, hook, in, out, 40 return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_filter);
91 dev_net(out)->ipv6.ip6table_filter);
92} 41}
93 42
94static struct nf_hook_ops ip6t_ops[] __read_mostly = { 43static struct nf_hook_ops *filter_ops __read_mostly;
95 {
96 .hook = ip6t_in_hook,
97 .owner = THIS_MODULE,
98 .pf = NFPROTO_IPV6,
99 .hooknum = NF_INET_LOCAL_IN,
100 .priority = NF_IP6_PRI_FILTER,
101 },
102 {
103 .hook = ip6t_in_hook,
104 .owner = THIS_MODULE,
105 .pf = NFPROTO_IPV6,
106 .hooknum = NF_INET_FORWARD,
107 .priority = NF_IP6_PRI_FILTER,
108 },
109 {
110 .hook = ip6t_local_out_hook,
111 .owner = THIS_MODULE,
112 .pf = NFPROTO_IPV6,
113 .hooknum = NF_INET_LOCAL_OUT,
114 .priority = NF_IP6_PRI_FILTER,
115 },
116};
117 44
118/* Default to forward because I got too much mail already. */ 45/* Default to forward because I got too much mail already. */
119static int forward = NF_ACCEPT; 46static int forward = NF_ACCEPT;
@@ -121,9 +48,18 @@ module_param(forward, bool, 0000);
121 48
122static int __net_init ip6table_filter_net_init(struct net *net) 49static int __net_init ip6table_filter_net_init(struct net *net)
123{ 50{
124 /* Register table */ 51 struct ip6t_replace *repl;
52
53 repl = ip6t_alloc_initial_table(&packet_filter);
54 if (repl == NULL)
55 return -ENOMEM;
56 /* Entry 1 is the FORWARD hook */
57 ((struct ip6t_standard *)repl->entries)[1].target.verdict =
58 -forward - 1;
59
125 net->ipv6.ip6table_filter = 60 net->ipv6.ip6table_filter =
126 ip6t_register_table(net, &packet_filter, &initial_table.repl); 61 ip6t_register_table(net, &packet_filter, repl);
62 kfree(repl);
127 if (IS_ERR(net->ipv6.ip6table_filter)) 63 if (IS_ERR(net->ipv6.ip6table_filter))
128 return PTR_ERR(net->ipv6.ip6table_filter); 64 return PTR_ERR(net->ipv6.ip6table_filter);
129 return 0; 65 return 0;
@@ -131,7 +67,7 @@ static int __net_init ip6table_filter_net_init(struct net *net)
131 67
132static void __net_exit ip6table_filter_net_exit(struct net *net) 68static void __net_exit ip6table_filter_net_exit(struct net *net)
133{ 69{
134 ip6t_unregister_table(net->ipv6.ip6table_filter); 70 ip6t_unregister_table(net, net->ipv6.ip6table_filter);
135} 71}
136 72
137static struct pernet_operations ip6table_filter_net_ops = { 73static struct pernet_operations ip6table_filter_net_ops = {
@@ -148,17 +84,16 @@ static int __init ip6table_filter_init(void)
148 return -EINVAL; 84 return -EINVAL;
149 } 85 }
150 86
151 /* Entry 1 is the FORWARD hook */
152 initial_table.entries[1].target.verdict = -forward - 1;
153
154 ret = register_pernet_subsys(&ip6table_filter_net_ops); 87 ret = register_pernet_subsys(&ip6table_filter_net_ops);
155 if (ret < 0) 88 if (ret < 0)
156 return ret; 89 return ret;
157 90
158 /* Register hooks */ 91 /* Register hooks */
159 ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 92 filter_ops = xt_hook_link(&packet_filter, ip6table_filter_hook);
160 if (ret < 0) 93 if (IS_ERR(filter_ops)) {
94 ret = PTR_ERR(filter_ops);
161 goto cleanup_table; 95 goto cleanup_table;
96 }
162 97
163 return ret; 98 return ret;
164 99
@@ -169,7 +104,7 @@ static int __init ip6table_filter_init(void)
169 104
170static void __exit ip6table_filter_fini(void) 105static void __exit ip6table_filter_fini(void)
171{ 106{
172 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 107 xt_hook_unlink(&packet_filter, filter_ops);
173 unregister_pernet_subsys(&ip6table_filter_net_ops); 108 unregister_pernet_subsys(&ip6table_filter_net_ops);
174} 109}
175 110
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index a929c19d30e3..7844e557c0ec 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -21,80 +21,17 @@ MODULE_DESCRIPTION("ip6tables mangle table");
21 (1 << NF_INET_LOCAL_OUT) | \ 21 (1 << NF_INET_LOCAL_OUT) | \
22 (1 << NF_INET_POST_ROUTING)) 22 (1 << NF_INET_POST_ROUTING))
23 23
24static const struct
25{
26 struct ip6t_replace repl;
27 struct ip6t_standard entries[5];
28 struct ip6t_error term;
29} initial_table __net_initdata = {
30 .repl = {
31 .name = "mangle",
32 .valid_hooks = MANGLE_VALID_HOOKS,
33 .num_entries = 6,
34 .size = sizeof(struct ip6t_standard) * 5 + sizeof(struct ip6t_error),
35 .hook_entry = {
36 [NF_INET_PRE_ROUTING] = 0,
37 [NF_INET_LOCAL_IN] = sizeof(struct ip6t_standard),
38 [NF_INET_FORWARD] = sizeof(struct ip6t_standard) * 2,
39 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3,
40 [NF_INET_POST_ROUTING] = sizeof(struct ip6t_standard) * 4,
41 },
42 .underflow = {
43 [NF_INET_PRE_ROUTING] = 0,
44 [NF_INET_LOCAL_IN] = sizeof(struct ip6t_standard),
45 [NF_INET_FORWARD] = sizeof(struct ip6t_standard) * 2,
46 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3,
47 [NF_INET_POST_ROUTING] = sizeof(struct ip6t_standard) * 4,
48 },
49 },
50 .entries = {
51 IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
52 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
53 IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
54 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
55 IP6T_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
56 },
57 .term = IP6T_ERROR_INIT, /* ERROR */
58};
59
60static const struct xt_table packet_mangler = { 24static const struct xt_table packet_mangler = {
61 .name = "mangle", 25 .name = "mangle",
62 .valid_hooks = MANGLE_VALID_HOOKS, 26 .valid_hooks = MANGLE_VALID_HOOKS,
63 .me = THIS_MODULE, 27 .me = THIS_MODULE,
64 .af = NFPROTO_IPV6, 28 .af = NFPROTO_IPV6,
29 .priority = NF_IP6_PRI_MANGLE,
65}; 30};
66 31
67/* The work comes in here from netfilter.c. */
68static unsigned int
69ip6t_in_hook(unsigned int hook,
70 struct sk_buff *skb,
71 const struct net_device *in,
72 const struct net_device *out,
73 int (*okfn)(struct sk_buff *))
74{
75 return ip6t_do_table(skb, hook, in, out,
76 dev_net(in)->ipv6.ip6table_mangle);
77}
78
79static unsigned int
80ip6t_post_routing_hook(unsigned int hook,
81 struct sk_buff *skb,
82 const struct net_device *in,
83 const struct net_device *out,
84 int (*okfn)(struct sk_buff *))
85{
86 return ip6t_do_table(skb, hook, in, out,
87 dev_net(out)->ipv6.ip6table_mangle);
88}
89
90static unsigned int 32static unsigned int
91ip6t_local_out_hook(unsigned int hook, 33ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
92 struct sk_buff *skb,
93 const struct net_device *in,
94 const struct net_device *out,
95 int (*okfn)(struct sk_buff *))
96{ 34{
97
98 unsigned int ret; 35 unsigned int ret;
99 struct in6_addr saddr, daddr; 36 struct in6_addr saddr, daddr;
100 u_int8_t hop_limit; 37 u_int8_t hop_limit;
@@ -119,7 +56,7 @@ ip6t_local_out_hook(unsigned int hook,
119 /* flowlabel and prio (includes version, which shouldn't change either */ 56 /* flowlabel and prio (includes version, which shouldn't change either */
120 flowlabel = *((u_int32_t *)ipv6_hdr(skb)); 57 flowlabel = *((u_int32_t *)ipv6_hdr(skb));
121 58
122 ret = ip6t_do_table(skb, hook, in, out, 59 ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
123 dev_net(out)->ipv6.ip6table_mangle); 60 dev_net(out)->ipv6.ip6table_mangle);
124 61
125 if (ret != NF_DROP && ret != NF_STOLEN && 62 if (ret != NF_DROP && ret != NF_STOLEN &&
@@ -132,49 +69,33 @@ ip6t_local_out_hook(unsigned int hook,
132 return ret; 69 return ret;
133} 70}
134 71
135static struct nf_hook_ops ip6t_ops[] __read_mostly = { 72/* The work comes in here from netfilter.c. */
136 { 73static unsigned int
137 .hook = ip6t_in_hook, 74ip6table_mangle_hook(unsigned int hook, struct sk_buff *skb,
138 .owner = THIS_MODULE, 75 const struct net_device *in, const struct net_device *out,
139 .pf = NFPROTO_IPV6, 76 int (*okfn)(struct sk_buff *))
140 .hooknum = NF_INET_PRE_ROUTING, 77{
141 .priority = NF_IP6_PRI_MANGLE, 78 if (hook == NF_INET_LOCAL_OUT)
142 }, 79 return ip6t_mangle_out(skb, out);
143 { 80 if (hook == NF_INET_POST_ROUTING)
144 .hook = ip6t_in_hook, 81 return ip6t_do_table(skb, hook, in, out,
145 .owner = THIS_MODULE, 82 dev_net(out)->ipv6.ip6table_mangle);
146 .pf = NFPROTO_IPV6, 83 /* INPUT/FORWARD */
147 .hooknum = NF_INET_LOCAL_IN, 84 return ip6t_do_table(skb, hook, in, out,
148 .priority = NF_IP6_PRI_MANGLE, 85 dev_net(in)->ipv6.ip6table_mangle);
149 }, 86}
150 {
151 .hook = ip6t_in_hook,
152 .owner = THIS_MODULE,
153 .pf = NFPROTO_IPV6,
154 .hooknum = NF_INET_FORWARD,
155 .priority = NF_IP6_PRI_MANGLE,
156 },
157 {
158 .hook = ip6t_local_out_hook,
159 .owner = THIS_MODULE,
160 .pf = NFPROTO_IPV6,
161 .hooknum = NF_INET_LOCAL_OUT,
162 .priority = NF_IP6_PRI_MANGLE,
163 },
164 {
165 .hook = ip6t_post_routing_hook,
166 .owner = THIS_MODULE,
167 .pf = NFPROTO_IPV6,
168 .hooknum = NF_INET_POST_ROUTING,
169 .priority = NF_IP6_PRI_MANGLE,
170 },
171};
172 87
88static struct nf_hook_ops *mangle_ops __read_mostly;
173static int __net_init ip6table_mangle_net_init(struct net *net) 89static int __net_init ip6table_mangle_net_init(struct net *net)
174{ 90{
175 /* Register table */ 91 struct ip6t_replace *repl;
92
93 repl = ip6t_alloc_initial_table(&packet_mangler);
94 if (repl == NULL)
95 return -ENOMEM;
176 net->ipv6.ip6table_mangle = 96 net->ipv6.ip6table_mangle =
177 ip6t_register_table(net, &packet_mangler, &initial_table.repl); 97 ip6t_register_table(net, &packet_mangler, repl);
98 kfree(repl);
178 if (IS_ERR(net->ipv6.ip6table_mangle)) 99 if (IS_ERR(net->ipv6.ip6table_mangle))
179 return PTR_ERR(net->ipv6.ip6table_mangle); 100 return PTR_ERR(net->ipv6.ip6table_mangle);
180 return 0; 101 return 0;
@@ -182,7 +103,7 @@ static int __net_init ip6table_mangle_net_init(struct net *net)
182 103
183static void __net_exit ip6table_mangle_net_exit(struct net *net) 104static void __net_exit ip6table_mangle_net_exit(struct net *net)
184{ 105{
185 ip6t_unregister_table(net->ipv6.ip6table_mangle); 106 ip6t_unregister_table(net, net->ipv6.ip6table_mangle);
186} 107}
187 108
188static struct pernet_operations ip6table_mangle_net_ops = { 109static struct pernet_operations ip6table_mangle_net_ops = {
@@ -199,9 +120,11 @@ static int __init ip6table_mangle_init(void)
199 return ret; 120 return ret;
200 121
201 /* Register hooks */ 122 /* Register hooks */
202 ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 123 mangle_ops = xt_hook_link(&packet_mangler, ip6table_mangle_hook);
203 if (ret < 0) 124 if (IS_ERR(mangle_ops)) {
125 ret = PTR_ERR(mangle_ops);
204 goto cleanup_table; 126 goto cleanup_table;
127 }
205 128
206 return ret; 129 return ret;
207 130
@@ -212,7 +135,7 @@ static int __init ip6table_mangle_init(void)
212 135
213static void __exit ip6table_mangle_fini(void) 136static void __exit ip6table_mangle_fini(void)
214{ 137{
215 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 138 xt_hook_unlink(&packet_mangler, mangle_ops);
216 unregister_pernet_subsys(&ip6table_mangle_net_ops); 139 unregister_pernet_subsys(&ip6table_mangle_net_ops);
217} 140}
218 141
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index ed1a1180f3b3..b9cf7cd61923 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -8,85 +8,37 @@
8 8
9#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) 9#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
10 10
11static const struct
12{
13 struct ip6t_replace repl;
14 struct ip6t_standard entries[2];
15 struct ip6t_error term;
16} initial_table __net_initdata = {
17 .repl = {
18 .name = "raw",
19 .valid_hooks = RAW_VALID_HOOKS,
20 .num_entries = 3,
21 .size = sizeof(struct ip6t_standard) * 2 + sizeof(struct ip6t_error),
22 .hook_entry = {
23 [NF_INET_PRE_ROUTING] = 0,
24 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard)
25 },
26 .underflow = {
27 [NF_INET_PRE_ROUTING] = 0,
28 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard)
29 },
30 },
31 .entries = {
32 IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
33 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
34 },
35 .term = IP6T_ERROR_INIT, /* ERROR */
36};
37
38static const struct xt_table packet_raw = { 11static const struct xt_table packet_raw = {
39 .name = "raw", 12 .name = "raw",
40 .valid_hooks = RAW_VALID_HOOKS, 13 .valid_hooks = RAW_VALID_HOOKS,
41 .me = THIS_MODULE, 14 .me = THIS_MODULE,
42 .af = NFPROTO_IPV6, 15 .af = NFPROTO_IPV6,
16 .priority = NF_IP6_PRI_RAW,
43}; 17};
44 18
45/* The work comes in here from netfilter.c. */ 19/* The work comes in here from netfilter.c. */
46static unsigned int 20static unsigned int
47ip6t_pre_routing_hook(unsigned int hook, 21ip6table_raw_hook(unsigned int hook, struct sk_buff *skb,
48 struct sk_buff *skb, 22 const struct net_device *in, const struct net_device *out,
49 const struct net_device *in, 23 int (*okfn)(struct sk_buff *))
50 const struct net_device *out,
51 int (*okfn)(struct sk_buff *))
52{ 24{
53 return ip6t_do_table(skb, hook, in, out, 25 const struct net *net = dev_net((in != NULL) ? in : out);
54 dev_net(in)->ipv6.ip6table_raw);
55}
56 26
57static unsigned int 27 return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_raw);
58ip6t_local_out_hook(unsigned int hook,
59 struct sk_buff *skb,
60 const struct net_device *in,
61 const struct net_device *out,
62 int (*okfn)(struct sk_buff *))
63{
64 return ip6t_do_table(skb, hook, in, out,
65 dev_net(out)->ipv6.ip6table_raw);
66} 28}
67 29
68static struct nf_hook_ops ip6t_ops[] __read_mostly = { 30static struct nf_hook_ops *rawtable_ops __read_mostly;
69 {
70 .hook = ip6t_pre_routing_hook,
71 .pf = NFPROTO_IPV6,
72 .hooknum = NF_INET_PRE_ROUTING,
73 .priority = NF_IP6_PRI_FIRST,
74 .owner = THIS_MODULE,
75 },
76 {
77 .hook = ip6t_local_out_hook,
78 .pf = NFPROTO_IPV6,
79 .hooknum = NF_INET_LOCAL_OUT,
80 .priority = NF_IP6_PRI_FIRST,
81 .owner = THIS_MODULE,
82 },
83};
84 31
85static int __net_init ip6table_raw_net_init(struct net *net) 32static int __net_init ip6table_raw_net_init(struct net *net)
86{ 33{
87 /* Register table */ 34 struct ip6t_replace *repl;
35
36 repl = ip6t_alloc_initial_table(&packet_raw);
37 if (repl == NULL)
38 return -ENOMEM;
88 net->ipv6.ip6table_raw = 39 net->ipv6.ip6table_raw =
89 ip6t_register_table(net, &packet_raw, &initial_table.repl); 40 ip6t_register_table(net, &packet_raw, repl);
41 kfree(repl);
90 if (IS_ERR(net->ipv6.ip6table_raw)) 42 if (IS_ERR(net->ipv6.ip6table_raw))
91 return PTR_ERR(net->ipv6.ip6table_raw); 43 return PTR_ERR(net->ipv6.ip6table_raw);
92 return 0; 44 return 0;
@@ -94,7 +46,7 @@ static int __net_init ip6table_raw_net_init(struct net *net)
94 46
95static void __net_exit ip6table_raw_net_exit(struct net *net) 47static void __net_exit ip6table_raw_net_exit(struct net *net)
96{ 48{
97 ip6t_unregister_table(net->ipv6.ip6table_raw); 49 ip6t_unregister_table(net, net->ipv6.ip6table_raw);
98} 50}
99 51
100static struct pernet_operations ip6table_raw_net_ops = { 52static struct pernet_operations ip6table_raw_net_ops = {
@@ -111,9 +63,11 @@ static int __init ip6table_raw_init(void)
111 return ret; 63 return ret;
112 64
113 /* Register hooks */ 65 /* Register hooks */
114 ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 66 rawtable_ops = xt_hook_link(&packet_raw, ip6table_raw_hook);
115 if (ret < 0) 67 if (IS_ERR(rawtable_ops)) {
68 ret = PTR_ERR(rawtable_ops);
116 goto cleanup_table; 69 goto cleanup_table;
70 }
117 71
118 return ret; 72 return ret;
119 73
@@ -124,7 +78,7 @@ static int __init ip6table_raw_init(void)
124 78
125static void __exit ip6table_raw_fini(void) 79static void __exit ip6table_raw_fini(void)
126{ 80{
127 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 81 xt_hook_unlink(&packet_raw, rawtable_ops);
128 unregister_pernet_subsys(&ip6table_raw_net_ops); 82 unregister_pernet_subsys(&ip6table_raw_net_ops);
129} 83}
130 84
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
index 41b444c60934..0824d865aa9b 100644
--- a/net/ipv6/netfilter/ip6table_security.c
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -26,106 +26,37 @@ MODULE_DESCRIPTION("ip6tables security table, for MAC rules");
26 (1 << NF_INET_FORWARD) | \ 26 (1 << NF_INET_FORWARD) | \
27 (1 << NF_INET_LOCAL_OUT) 27 (1 << NF_INET_LOCAL_OUT)
28 28
29static const struct
30{
31 struct ip6t_replace repl;
32 struct ip6t_standard entries[3];
33 struct ip6t_error term;
34} initial_table __net_initdata = {
35 .repl = {
36 .name = "security",
37 .valid_hooks = SECURITY_VALID_HOOKS,
38 .num_entries = 4,
39 .size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error),
40 .hook_entry = {
41 [NF_INET_LOCAL_IN] = 0,
42 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
43 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2,
44 },
45 .underflow = {
46 [NF_INET_LOCAL_IN] = 0,
47 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
48 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2,
49 },
50 },
51 .entries = {
52 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
53 IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
54 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
55 },
56 .term = IP6T_ERROR_INIT, /* ERROR */
57};
58
59static const struct xt_table security_table = { 29static const struct xt_table security_table = {
60 .name = "security", 30 .name = "security",
61 .valid_hooks = SECURITY_VALID_HOOKS, 31 .valid_hooks = SECURITY_VALID_HOOKS,
62 .me = THIS_MODULE, 32 .me = THIS_MODULE,
63 .af = NFPROTO_IPV6, 33 .af = NFPROTO_IPV6,
34 .priority = NF_IP6_PRI_SECURITY,
64}; 35};
65 36
66static unsigned int 37static unsigned int
67ip6t_local_in_hook(unsigned int hook, 38ip6table_security_hook(unsigned int hook, struct sk_buff *skb,
68 struct sk_buff *skb, 39 const struct net_device *in,
69 const struct net_device *in, 40 const struct net_device *out,
70 const struct net_device *out, 41 int (*okfn)(struct sk_buff *))
71 int (*okfn)(struct sk_buff *))
72{
73 return ip6t_do_table(skb, hook, in, out,
74 dev_net(in)->ipv6.ip6table_security);
75}
76
77static unsigned int
78ip6t_forward_hook(unsigned int hook,
79 struct sk_buff *skb,
80 const struct net_device *in,
81 const struct net_device *out,
82 int (*okfn)(struct sk_buff *))
83{ 42{
84 return ip6t_do_table(skb, hook, in, out, 43 const struct net *net = dev_net((in != NULL) ? in : out);
85 dev_net(in)->ipv6.ip6table_security);
86}
87 44
88static unsigned int 45 return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_security);
89ip6t_local_out_hook(unsigned int hook,
90 struct sk_buff *skb,
91 const struct net_device *in,
92 const struct net_device *out,
93 int (*okfn)(struct sk_buff *))
94{
95 /* TBD: handle short packets via raw socket */
96 return ip6t_do_table(skb, hook, in, out,
97 dev_net(out)->ipv6.ip6table_security);
98} 46}
99 47
100static struct nf_hook_ops ip6t_ops[] __read_mostly = { 48static struct nf_hook_ops *sectbl_ops __read_mostly;
101 {
102 .hook = ip6t_local_in_hook,
103 .owner = THIS_MODULE,
104 .pf = NFPROTO_IPV6,
105 .hooknum = NF_INET_LOCAL_IN,
106 .priority = NF_IP6_PRI_SECURITY,
107 },
108 {
109 .hook = ip6t_forward_hook,
110 .owner = THIS_MODULE,
111 .pf = NFPROTO_IPV6,
112 .hooknum = NF_INET_FORWARD,
113 .priority = NF_IP6_PRI_SECURITY,
114 },
115 {
116 .hook = ip6t_local_out_hook,
117 .owner = THIS_MODULE,
118 .pf = NFPROTO_IPV6,
119 .hooknum = NF_INET_LOCAL_OUT,
120 .priority = NF_IP6_PRI_SECURITY,
121 },
122};
123 49
124static int __net_init ip6table_security_net_init(struct net *net) 50static int __net_init ip6table_security_net_init(struct net *net)
125{ 51{
126 net->ipv6.ip6table_security = 52 struct ip6t_replace *repl;
127 ip6t_register_table(net, &security_table, &initial_table.repl);
128 53
54 repl = ip6t_alloc_initial_table(&security_table);
55 if (repl == NULL)
56 return -ENOMEM;
57 net->ipv6.ip6table_security =
58 ip6t_register_table(net, &security_table, repl);
59 kfree(repl);
129 if (IS_ERR(net->ipv6.ip6table_security)) 60 if (IS_ERR(net->ipv6.ip6table_security))
130 return PTR_ERR(net->ipv6.ip6table_security); 61 return PTR_ERR(net->ipv6.ip6table_security);
131 62
@@ -134,7 +65,7 @@ static int __net_init ip6table_security_net_init(struct net *net)
134 65
135static void __net_exit ip6table_security_net_exit(struct net *net) 66static void __net_exit ip6table_security_net_exit(struct net *net)
136{ 67{
137 ip6t_unregister_table(net->ipv6.ip6table_security); 68 ip6t_unregister_table(net, net->ipv6.ip6table_security);
138} 69}
139 70
140static struct pernet_operations ip6table_security_net_ops = { 71static struct pernet_operations ip6table_security_net_ops = {
@@ -150,9 +81,11 @@ static int __init ip6table_security_init(void)
150 if (ret < 0) 81 if (ret < 0)
151 return ret; 82 return ret;
152 83
153 ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 84 sectbl_ops = xt_hook_link(&security_table, ip6table_security_hook);
154 if (ret < 0) 85 if (IS_ERR(sectbl_ops)) {
86 ret = PTR_ERR(sectbl_ops);
155 goto cleanup_table; 87 goto cleanup_table;
88 }
156 89
157 return ret; 90 return ret;
158 91
@@ -163,7 +96,7 @@ cleanup_table:
163 96
164static void __exit ip6table_security_fini(void) 97static void __exit ip6table_security_fini(void)
165{ 98{
166 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 99 xt_hook_unlink(&security_table, sectbl_ops);
167 unregister_pernet_subsys(&ip6table_security_net_ops); 100 unregister_pernet_subsys(&ip6table_security_net_ops);
168} 101}
169 102
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 5f2ec208a8c3..996c3f41fecd 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -20,12 +20,14 @@
20#include <net/ipv6.h> 20#include <net/ipv6.h>
21#include <net/inet_frag.h> 21#include <net/inet_frag.h>
22 22
23#include <linux/netfilter_bridge.h>
23#include <linux/netfilter_ipv6.h> 24#include <linux/netfilter_ipv6.h>
24#include <net/netfilter/nf_conntrack.h> 25#include <net/netfilter/nf_conntrack.h>
25#include <net/netfilter/nf_conntrack_helper.h> 26#include <net/netfilter/nf_conntrack_helper.h>
26#include <net/netfilter/nf_conntrack_l4proto.h> 27#include <net/netfilter/nf_conntrack_l4proto.h>
27#include <net/netfilter/nf_conntrack_l3proto.h> 28#include <net/netfilter/nf_conntrack_l3proto.h>
28#include <net/netfilter/nf_conntrack_core.h> 29#include <net/netfilter/nf_conntrack_core.h>
30#include <net/netfilter/nf_conntrack_zones.h>
29#include <net/netfilter/ipv6/nf_conntrack_ipv6.h> 31#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
30#include <net/netfilter/nf_log.h> 32#include <net/netfilter/nf_log.h>
31 33
@@ -187,6 +189,26 @@ out:
187 return nf_conntrack_confirm(skb); 189 return nf_conntrack_confirm(skb);
188} 190}
189 191
192static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
193 struct sk_buff *skb)
194{
195 u16 zone = NF_CT_DEFAULT_ZONE;
196
197 if (skb->nfct)
198 zone = nf_ct_zone((struct nf_conn *)skb->nfct);
199
200#ifdef CONFIG_BRIDGE_NETFILTER
201 if (skb->nf_bridge &&
202 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
203 return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
204#endif
205 if (hooknum == NF_INET_PRE_ROUTING)
206 return IP6_DEFRAG_CONNTRACK_IN + zone;
207 else
208 return IP6_DEFRAG_CONNTRACK_OUT + zone;
209
210}
211
190static unsigned int ipv6_defrag(unsigned int hooknum, 212static unsigned int ipv6_defrag(unsigned int hooknum,
191 struct sk_buff *skb, 213 struct sk_buff *skb,
192 const struct net_device *in, 214 const struct net_device *in,
@@ -196,11 +218,10 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
196 struct sk_buff *reasm; 218 struct sk_buff *reasm;
197 219
198 /* Previously seen (loopback)? */ 220 /* Previously seen (loopback)? */
199 if (skb->nfct) 221 if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
200 return NF_ACCEPT; 222 return NF_ACCEPT;
201 223
202 reasm = nf_ct_frag6_gather(skb); 224 reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
203
204 /* queued */ 225 /* queued */
205 if (reasm == NULL) 226 if (reasm == NULL)
206 return NF_STOLEN; 227 return NF_STOLEN;
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index c7b8bd1d7984..9be81776415e 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -23,6 +23,7 @@
23#include <net/netfilter/nf_conntrack_tuple.h> 23#include <net/netfilter/nf_conntrack_tuple.h>
24#include <net/netfilter/nf_conntrack_l4proto.h> 24#include <net/netfilter/nf_conntrack_l4proto.h>
25#include <net/netfilter/nf_conntrack_core.h> 25#include <net/netfilter/nf_conntrack_core.h>
26#include <net/netfilter/nf_conntrack_zones.h>
26#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h> 27#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
27#include <net/netfilter/nf_log.h> 28#include <net/netfilter/nf_log.h>
28 29
@@ -128,7 +129,7 @@ static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
128} 129}
129 130
130static int 131static int
131icmpv6_error_message(struct net *net, 132icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
132 struct sk_buff *skb, 133 struct sk_buff *skb,
133 unsigned int icmp6off, 134 unsigned int icmp6off,
134 enum ip_conntrack_info *ctinfo, 135 enum ip_conntrack_info *ctinfo,
@@ -137,6 +138,7 @@ icmpv6_error_message(struct net *net,
137 struct nf_conntrack_tuple intuple, origtuple; 138 struct nf_conntrack_tuple intuple, origtuple;
138 const struct nf_conntrack_tuple_hash *h; 139 const struct nf_conntrack_tuple_hash *h;
139 const struct nf_conntrack_l4proto *inproto; 140 const struct nf_conntrack_l4proto *inproto;
141 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
140 142
141 NF_CT_ASSERT(skb->nfct == NULL); 143 NF_CT_ASSERT(skb->nfct == NULL);
142 144
@@ -163,7 +165,7 @@ icmpv6_error_message(struct net *net,
163 165
164 *ctinfo = IP_CT_RELATED; 166 *ctinfo = IP_CT_RELATED;
165 167
166 h = nf_conntrack_find_get(net, &intuple); 168 h = nf_conntrack_find_get(net, zone, &intuple);
167 if (!h) { 169 if (!h) {
168 pr_debug("icmpv6_error: no match\n"); 170 pr_debug("icmpv6_error: no match\n");
169 return -NF_ACCEPT; 171 return -NF_ACCEPT;
@@ -179,7 +181,8 @@ icmpv6_error_message(struct net *net,
179} 181}
180 182
181static int 183static int
182icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff, 184icmpv6_error(struct net *net, struct nf_conn *tmpl,
185 struct sk_buff *skb, unsigned int dataoff,
183 enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) 186 enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum)
184{ 187{
185 const struct icmp6hdr *icmp6h; 188 const struct icmp6hdr *icmp6h;
@@ -215,7 +218,7 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
215 if (icmp6h->icmp6_type >= 128) 218 if (icmp6h->icmp6_type >= 128)
216 return NF_ACCEPT; 219 return NF_ACCEPT;
217 220
218 return icmpv6_error_message(net, skb, dataoff, ctinfo, hooknum); 221 return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum);
219} 222}
220 223
221#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 224#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index e0b9424fa1b2..f1171b744650 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -45,9 +45,6 @@
45#include <linux/kernel.h> 45#include <linux/kernel.h>
46#include <linux/module.h> 46#include <linux/module.h>
47 47
48#define NF_CT_FRAG6_HIGH_THRESH 262144 /* == 256*1024 */
49#define NF_CT_FRAG6_LOW_THRESH 196608 /* == 192*1024 */
50#define NF_CT_FRAG6_TIMEOUT IPV6_FRAG_TIMEOUT
51 48
52struct nf_ct_frag6_skb_cb 49struct nf_ct_frag6_skb_cb
53{ 50{
@@ -63,6 +60,7 @@ struct nf_ct_frag6_queue
63 struct inet_frag_queue q; 60 struct inet_frag_queue q;
64 61
65 __be32 id; /* fragment id */ 62 __be32 id; /* fragment id */
63 u32 user;
66 struct in6_addr saddr; 64 struct in6_addr saddr;
67 struct in6_addr daddr; 65 struct in6_addr daddr;
68 66
@@ -168,13 +166,14 @@ out:
168/* Creation primitives. */ 166/* Creation primitives. */
169 167
170static __inline__ struct nf_ct_frag6_queue * 168static __inline__ struct nf_ct_frag6_queue *
171fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst) 169fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
172{ 170{
173 struct inet_frag_queue *q; 171 struct inet_frag_queue *q;
174 struct ip6_create_arg arg; 172 struct ip6_create_arg arg;
175 unsigned int hash; 173 unsigned int hash;
176 174
177 arg.id = id; 175 arg.id = id;
176 arg.user = user;
178 arg.src = src; 177 arg.src = src;
179 arg.dst = dst; 178 arg.dst = dst;
180 179
@@ -470,7 +469,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
470 469
471 /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */ 470 /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */
472 fp = skb_shinfo(head)->frag_list; 471 fp = skb_shinfo(head)->frag_list;
473 if (NFCT_FRAG6_CB(fp)->orig == NULL) 472 if (fp && NFCT_FRAG6_CB(fp)->orig == NULL)
474 /* at above code, head skb is divided into two skbs. */ 473 /* at above code, head skb is divided into two skbs. */
475 fp = fp->next; 474 fp = fp->next;
476 475
@@ -559,7 +558,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
559 return 0; 558 return 0;
560} 559}
561 560
562struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) 561struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
563{ 562{
564 struct sk_buff *clone; 563 struct sk_buff *clone;
565 struct net_device *dev = skb->dev; 564 struct net_device *dev = skb->dev;
@@ -596,16 +595,10 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
596 hdr = ipv6_hdr(clone); 595 hdr = ipv6_hdr(clone);
597 fhdr = (struct frag_hdr *)skb_transport_header(clone); 596 fhdr = (struct frag_hdr *)skb_transport_header(clone);
598 597
599 if (!(fhdr->frag_off & htons(0xFFF9))) {
600 pr_debug("Invalid fragment offset\n");
601 /* It is not a fragmented frame */
602 goto ret_orig;
603 }
604
605 if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh) 598 if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
606 nf_ct_frag6_evictor(); 599 nf_ct_frag6_evictor();
607 600
608 fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr); 601 fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr);
609 if (fq == NULL) { 602 if (fq == NULL) {
610 pr_debug("Can't find and can't create new queue\n"); 603 pr_debug("Can't find and can't create new queue\n");
611 goto ret_orig; 604 goto ret_orig;
@@ -668,8 +661,8 @@ int nf_ct_frag6_init(void)
668 nf_frags.frag_expire = nf_ct_frag6_expire; 661 nf_frags.frag_expire = nf_ct_frag6_expire;
669 nf_frags.secret_interval = 10 * 60 * HZ; 662 nf_frags.secret_interval = 10 * 60 * HZ;
670 nf_init_frags.timeout = IPV6_FRAG_TIMEOUT; 663 nf_init_frags.timeout = IPV6_FRAG_TIMEOUT;
671 nf_init_frags.high_thresh = 256 * 1024; 664 nf_init_frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
672 nf_init_frags.low_thresh = 192 * 1024; 665 nf_init_frags.low_thresh = IPV6_FRAG_LOW_THRESH;
673 inet_frags_init_net(&nf_init_frags); 666 inet_frags_init_net(&nf_init_frags);
674 inet_frags_init(&nf_frags); 667 inet_frags_init(&nf_frags);
675 668
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index c9605c3ad91f..58344c0fbd13 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -59,7 +59,7 @@ static const struct file_operations sockstat6_seq_fops = {
59 .release = single_release_net, 59 .release = single_release_net,
60}; 60};
61 61
62static struct snmp_mib snmp6_ipstats_list[] = { 62static const struct snmp_mib snmp6_ipstats_list[] = {
63/* ipv6 mib according to RFC 2465 */ 63/* ipv6 mib according to RFC 2465 */
64 SNMP_MIB_ITEM("Ip6InReceives", IPSTATS_MIB_INPKTS), 64 SNMP_MIB_ITEM("Ip6InReceives", IPSTATS_MIB_INPKTS),
65 SNMP_MIB_ITEM("Ip6InHdrErrors", IPSTATS_MIB_INHDRERRORS), 65 SNMP_MIB_ITEM("Ip6InHdrErrors", IPSTATS_MIB_INHDRERRORS),
@@ -92,7 +92,7 @@ static struct snmp_mib snmp6_ipstats_list[] = {
92 SNMP_MIB_SENTINEL 92 SNMP_MIB_SENTINEL
93}; 93};
94 94
95static struct snmp_mib snmp6_icmp6_list[] = { 95static const struct snmp_mib snmp6_icmp6_list[] = {
96/* icmpv6 mib according to RFC 2466 */ 96/* icmpv6 mib according to RFC 2466 */
97 SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS), 97 SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS),
98 SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS), 98 SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS),
@@ -120,7 +120,7 @@ static const char *const icmp6type2name[256] = {
120}; 120};
121 121
122 122
123static struct snmp_mib snmp6_udp6_list[] = { 123static const struct snmp_mib snmp6_udp6_list[] = {
124 SNMP_MIB_ITEM("Udp6InDatagrams", UDP_MIB_INDATAGRAMS), 124 SNMP_MIB_ITEM("Udp6InDatagrams", UDP_MIB_INDATAGRAMS),
125 SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS), 125 SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS),
126 SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS), 126 SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS),
@@ -128,7 +128,7 @@ static struct snmp_mib snmp6_udp6_list[] = {
128 SNMP_MIB_SENTINEL 128 SNMP_MIB_SENTINEL
129}; 129};
130 130
131static struct snmp_mib snmp6_udplite6_list[] = { 131static const struct snmp_mib snmp6_udplite6_list[] = {
132 SNMP_MIB_ITEM("UdpLite6InDatagrams", UDP_MIB_INDATAGRAMS), 132 SNMP_MIB_ITEM("UdpLite6InDatagrams", UDP_MIB_INDATAGRAMS),
133 SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS), 133 SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS),
134 SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS), 134 SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS),
@@ -136,7 +136,7 @@ static struct snmp_mib snmp6_udplite6_list[] = {
136 SNMP_MIB_SENTINEL 136 SNMP_MIB_SENTINEL
137}; 137};
138 138
139static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib) 139static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **mib)
140{ 140{
141 char name[32]; 141 char name[32];
142 int i; 142 int i;
@@ -170,8 +170,8 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib)
170 return; 170 return;
171} 171}
172 172
173static inline void 173static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **mib,
174snmp6_seq_show_item(struct seq_file *seq, void **mib, struct snmp_mib *itemlist) 174 const struct snmp_mib *itemlist)
175{ 175{
176 int i; 176 int i;
177 for (i=0; itemlist[i].name; i++) 177 for (i=0; itemlist[i].name; i++)
@@ -183,14 +183,15 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
183{ 183{
184 struct net *net = (struct net *)seq->private; 184 struct net *net = (struct net *)seq->private;
185 185
186 snmp6_seq_show_item(seq, (void **)net->mib.ipv6_statistics, 186 snmp6_seq_show_item(seq, (void __percpu **)net->mib.ipv6_statistics,
187 snmp6_ipstats_list); 187 snmp6_ipstats_list);
188 snmp6_seq_show_item(seq, (void **)net->mib.icmpv6_statistics, 188 snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics,
189 snmp6_icmp6_list); 189 snmp6_icmp6_list);
190 snmp6_seq_show_icmpv6msg(seq, (void **)net->mib.icmpv6msg_statistics); 190 snmp6_seq_show_icmpv6msg(seq,
191 snmp6_seq_show_item(seq, (void **)net->mib.udp_stats_in6, 191 (void __percpu **)net->mib.icmpv6msg_statistics);
192 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6,
192 snmp6_udp6_list); 193 snmp6_udp6_list);
193 snmp6_seq_show_item(seq, (void **)net->mib.udplite_stats_in6, 194 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6,
194 snmp6_udplite6_list); 195 snmp6_udplite6_list);
195 return 0; 196 return 0;
196} 197}
@@ -213,9 +214,11 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
213 struct inet6_dev *idev = (struct inet6_dev *)seq->private; 214 struct inet6_dev *idev = (struct inet6_dev *)seq->private;
214 215
215 seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex); 216 seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex);
216 snmp6_seq_show_item(seq, (void **)idev->stats.ipv6, snmp6_ipstats_list); 217 snmp6_seq_show_item(seq, (void __percpu **)idev->stats.ipv6,
217 snmp6_seq_show_item(seq, (void **)idev->stats.icmpv6, snmp6_icmp6_list); 218 snmp6_ipstats_list);
218 snmp6_seq_show_icmpv6msg(seq, (void **)idev->stats.icmpv6msg); 219 snmp6_seq_show_item(seq, (void __percpu **)idev->stats.icmpv6,
220 snmp6_icmp6_list);
221 snmp6_seq_show_icmpv6msg(seq, (void __percpu **)idev->stats.icmpv6msg);
219 return 0; 222 return 0;
220} 223}
221 224
@@ -259,7 +262,7 @@ int snmp6_unregister_dev(struct inet6_dev *idev)
259 struct net *net = dev_net(idev->dev); 262 struct net *net = dev_net(idev->dev);
260 if (!net->mib.proc_net_devsnmp6) 263 if (!net->mib.proc_net_devsnmp6)
261 return -ENOENT; 264 return -ENOENT;
262 if (!idev || !idev->stats.proc_dir_entry) 265 if (!idev->stats.proc_dir_entry)
263 return -EINVAL; 266 return -EINVAL;
264 remove_proc_entry(idev->stats.proc_dir_entry->name, 267 remove_proc_entry(idev->stats.proc_dir_entry->name,
265 net->mib.proc_net_devsnmp6); 268 net->mib.proc_net_devsnmp6);
@@ -267,7 +270,7 @@ int snmp6_unregister_dev(struct inet6_dev *idev)
267 return 0; 270 return 0;
268} 271}
269 272
270static int ipv6_proc_init_net(struct net *net) 273static int __net_init ipv6_proc_init_net(struct net *net)
271{ 274{
272 if (!proc_net_fops_create(net, "sockstat6", S_IRUGO, 275 if (!proc_net_fops_create(net, "sockstat6", S_IRUGO,
273 &sockstat6_seq_fops)) 276 &sockstat6_seq_fops))
@@ -288,7 +291,7 @@ proc_dev_snmp6_fail:
288 return -ENOMEM; 291 return -ENOMEM;
289} 292}
290 293
291static void ipv6_proc_exit_net(struct net *net) 294static void __net_exit ipv6_proc_exit_net(struct net *net)
292{ 295{
293 proc_net_remove(net, "sockstat6"); 296 proc_net_remove(net, "sockstat6");
294 proc_net_remove(net, "dev_snmp6"); 297 proc_net_remove(net, "dev_snmp6");
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 926ce8eeffaf..ed31c37c6e39 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1275,7 +1275,7 @@ static const struct file_operations raw6_seq_fops = {
1275 .release = seq_release_net, 1275 .release = seq_release_net,
1276}; 1276};
1277 1277
1278static int raw6_init_net(struct net *net) 1278static int __net_init raw6_init_net(struct net *net)
1279{ 1279{
1280 if (!proc_net_fops_create(net, "raw6", S_IRUGO, &raw6_seq_fops)) 1280 if (!proc_net_fops_create(net, "raw6", S_IRUGO, &raw6_seq_fops))
1281 return -ENOMEM; 1281 return -ENOMEM;
@@ -1283,7 +1283,7 @@ static int raw6_init_net(struct net *net)
1283 return 0; 1283 return 0;
1284} 1284}
1285 1285
1286static void raw6_exit_net(struct net *net) 1286static void __net_exit raw6_exit_net(struct net *net)
1287{ 1287{
1288 proc_net_remove(net, "raw6"); 1288 proc_net_remove(net, "raw6");
1289} 1289}
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 4d98549a6868..a555156e9779 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -72,6 +72,7 @@ struct frag_queue
72 struct inet_frag_queue q; 72 struct inet_frag_queue q;
73 73
74 __be32 id; /* fragment id */ 74 __be32 id; /* fragment id */
75 u32 user;
75 struct in6_addr saddr; 76 struct in6_addr saddr;
76 struct in6_addr daddr; 77 struct in6_addr daddr;
77 78
@@ -141,7 +142,7 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a)
141 struct ip6_create_arg *arg = a; 142 struct ip6_create_arg *arg = a;
142 143
143 fq = container_of(q, struct frag_queue, q); 144 fq = container_of(q, struct frag_queue, q);
144 return (fq->id == arg->id && 145 return (fq->id == arg->id && fq->user == arg->user &&
145 ipv6_addr_equal(&fq->saddr, arg->src) && 146 ipv6_addr_equal(&fq->saddr, arg->src) &&
146 ipv6_addr_equal(&fq->daddr, arg->dst)); 147 ipv6_addr_equal(&fq->daddr, arg->dst));
147} 148}
@@ -163,6 +164,7 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
163 struct ip6_create_arg *arg = a; 164 struct ip6_create_arg *arg = a;
164 165
165 fq->id = arg->id; 166 fq->id = arg->id;
167 fq->user = arg->user;
166 ipv6_addr_copy(&fq->saddr, arg->src); 168 ipv6_addr_copy(&fq->saddr, arg->src);
167 ipv6_addr_copy(&fq->daddr, arg->dst); 169 ipv6_addr_copy(&fq->daddr, arg->dst);
168} 170}
@@ -226,7 +228,7 @@ static void ip6_frag_expire(unsigned long data)
226 pointer directly, device might already disappeared. 228 pointer directly, device might already disappeared.
227 */ 229 */
228 fq->q.fragments->dev = dev; 230 fq->q.fragments->dev = dev;
229 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); 231 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
230out_rcu_unlock: 232out_rcu_unlock:
231 rcu_read_unlock(); 233 rcu_read_unlock();
232out: 234out:
@@ -235,14 +237,14 @@ out:
235} 237}
236 238
237static __inline__ struct frag_queue * 239static __inline__ struct frag_queue *
238fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst, 240fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst)
239 struct inet6_dev *idev)
240{ 241{
241 struct inet_frag_queue *q; 242 struct inet_frag_queue *q;
242 struct ip6_create_arg arg; 243 struct ip6_create_arg arg;
243 unsigned int hash; 244 unsigned int hash;
244 245
245 arg.id = id; 246 arg.id = id;
247 arg.user = IP6_DEFRAG_LOCAL_DELIVER;
246 arg.src = src; 248 arg.src = src;
247 arg.dst = dst; 249 arg.dst = dst;
248 250
@@ -251,13 +253,9 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
251 253
252 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); 254 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
253 if (q == NULL) 255 if (q == NULL)
254 goto oom; 256 return NULL;
255 257
256 return container_of(q, struct frag_queue, q); 258 return container_of(q, struct frag_queue, q);
257
258oom:
259 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS);
260 return NULL;
261} 259}
262 260
263static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, 261static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
@@ -603,8 +601,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
603 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) 601 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
604 ip6_evictor(net, ip6_dst_idev(skb_dst(skb))); 602 ip6_evictor(net, ip6_dst_idev(skb_dst(skb)));
605 603
606 if ((fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, 604 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
607 ip6_dst_idev(skb_dst(skb)))) != NULL) { 605 if (fq != NULL) {
608 int ret; 606 int ret;
609 607
610 spin_lock(&fq->q.lock); 608 spin_lock(&fq->q.lock);
@@ -669,7 +667,7 @@ static struct ctl_table ip6_frags_ctl_table[] = {
669 { } 667 { }
670}; 668};
671 669
672static int ip6_frags_ns_sysctl_register(struct net *net) 670static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
673{ 671{
674 struct ctl_table *table; 672 struct ctl_table *table;
675 struct ctl_table_header *hdr; 673 struct ctl_table_header *hdr;
@@ -699,13 +697,14 @@ err_alloc:
699 return -ENOMEM; 697 return -ENOMEM;
700} 698}
701 699
702static void ip6_frags_ns_sysctl_unregister(struct net *net) 700static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
703{ 701{
704 struct ctl_table *table; 702 struct ctl_table *table;
705 703
706 table = net->ipv6.sysctl.frags_hdr->ctl_table_arg; 704 table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
707 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); 705 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
708 kfree(table); 706 if (!net_eq(net, &init_net))
707 kfree(table);
709} 708}
710 709
711static struct ctl_table_header *ip6_ctl_header; 710static struct ctl_table_header *ip6_ctl_header;
@@ -741,10 +740,10 @@ static inline void ip6_frags_sysctl_unregister(void)
741} 740}
742#endif 741#endif
743 742
744static int ipv6_frags_init_net(struct net *net) 743static int __net_init ipv6_frags_init_net(struct net *net)
745{ 744{
746 net->ipv6.frags.high_thresh = 256 * 1024; 745 net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
747 net->ipv6.frags.low_thresh = 192 * 1024; 746 net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
748 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT; 747 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
749 748
750 inet_frags_init_net(&net->ipv6.frags); 749 inet_frags_init_net(&net->ipv6.frags);
@@ -752,7 +751,7 @@ static int ipv6_frags_init_net(struct net *net)
752 return ip6_frags_ns_sysctl_register(net); 751 return ip6_frags_ns_sysctl_register(net);
753} 752}
754 753
755static void ipv6_frags_exit_net(struct net *net) 754static void __net_exit ipv6_frags_exit_net(struct net *net)
756{ 755{
757 ip6_frags_ns_sysctl_unregister(net); 756 ip6_frags_ns_sysctl_unregister(net);
758 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); 757 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index db3b27303890..0d7713c5c206 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -819,15 +819,8 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
819 819
820 if (!ipv6_addr_any(&fl->fl6_src)) 820 if (!ipv6_addr_any(&fl->fl6_src))
821 flags |= RT6_LOOKUP_F_HAS_SADDR; 821 flags |= RT6_LOOKUP_F_HAS_SADDR;
822 else if (sk) { 822 else if (sk)
823 unsigned int prefs = inet6_sk(sk)->srcprefs; 823 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
824 if (prefs & IPV6_PREFER_SRC_TMP)
825 flags |= RT6_LOOKUP_F_SRCPREF_TMP;
826 if (prefs & IPV6_PREFER_SRC_PUBLIC)
827 flags |= RT6_LOOKUP_F_SRCPREF_PUBLIC;
828 if (prefs & IPV6_PREFER_SRC_COA)
829 flags |= RT6_LOOKUP_F_SRCPREF_COA;
830 }
831 824
832 return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output); 825 return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output);
833} 826}
@@ -886,7 +879,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
886 879
887 rt = (struct rt6_info *) dst; 880 rt = (struct rt6_info *) dst;
888 881
889 if (rt && rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) 882 if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
890 return dst; 883 return dst;
891 884
892 return NULL; 885 return NULL;
@@ -897,19 +890,24 @@ static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
897 struct rt6_info *rt = (struct rt6_info *) dst; 890 struct rt6_info *rt = (struct rt6_info *) dst;
898 891
899 if (rt) { 892 if (rt) {
900 if (rt->rt6i_flags & RTF_CACHE) 893 if (rt->rt6i_flags & RTF_CACHE) {
901 ip6_del_rt(rt); 894 if (rt6_check_expired(rt)) {
902 else 895 ip6_del_rt(rt);
896 dst = NULL;
897 }
898 } else {
903 dst_release(dst); 899 dst_release(dst);
900 dst = NULL;
901 }
904 } 902 }
905 return NULL; 903 return dst;
906} 904}
907 905
908static void ip6_link_failure(struct sk_buff *skb) 906static void ip6_link_failure(struct sk_buff *skb)
909{ 907{
910 struct rt6_info *rt; 908 struct rt6_info *rt;
911 909
912 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev); 910 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
913 911
914 rt = (struct rt6_info *) skb_dst(skb); 912 rt = (struct rt6_info *) skb_dst(skb);
915 if (rt) { 913 if (rt) {
@@ -1873,7 +1871,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
1873 switch (ipstats_mib_noroutes) { 1871 switch (ipstats_mib_noroutes) {
1874 case IPSTATS_MIB_INNOROUTES: 1872 case IPSTATS_MIB_INNOROUTES:
1875 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr); 1873 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
1876 if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED) { 1874 if (type == IPV6_ADDR_ANY) {
1877 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst), 1875 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
1878 IPSTATS_MIB_INADDRERRORS); 1876 IPSTATS_MIB_INADDRERRORS);
1879 break; 1877 break;
@@ -1884,7 +1882,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
1884 ipstats_mib_noroutes); 1882 ipstats_mib_noroutes);
1885 break; 1883 break;
1886 } 1884 }
1887 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0, skb->dev); 1885 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
1888 kfree_skb(skb); 1886 kfree_skb(skb);
1889 return 0; 1887 return 0;
1890} 1888}
@@ -2612,7 +2610,7 @@ ctl_table ipv6_route_table_template[] = {
2612 { } 2610 { }
2613}; 2611};
2614 2612
2615struct ctl_table *ipv6_route_sysctl_init(struct net *net) 2613struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2616{ 2614{
2617 struct ctl_table *table; 2615 struct ctl_table *table;
2618 2616
@@ -2630,13 +2628,14 @@ struct ctl_table *ipv6_route_sysctl_init(struct net *net)
2630 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity; 2628 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2631 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires; 2629 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2632 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; 2630 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2631 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2633 } 2632 }
2634 2633
2635 return table; 2634 return table;
2636} 2635}
2637#endif 2636#endif
2638 2637
2639static int ip6_route_net_init(struct net *net) 2638static int __net_init ip6_route_net_init(struct net *net)
2640{ 2639{
2641 int ret = -ENOMEM; 2640 int ret = -ENOMEM;
2642 2641
@@ -2701,7 +2700,7 @@ out_ip6_dst_ops:
2701 goto out; 2700 goto out;
2702} 2701}
2703 2702
2704static void ip6_route_net_exit(struct net *net) 2703static void __net_exit ip6_route_net_exit(struct net *net)
2705{ 2704{
2706#ifdef CONFIG_PROC_FS 2705#ifdef CONFIG_PROC_FS
2707 proc_net_remove(net, "ipv6_route"); 2706 proc_net_remove(net, "ipv6_route");
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 976e68244b99..b1eea811be48 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -62,7 +62,6 @@
62#define HASH_SIZE 16 62#define HASH_SIZE 16
63#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) 63#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
64 64
65static void ipip6_fb_tunnel_init(struct net_device *dev);
66static void ipip6_tunnel_init(struct net_device *dev); 65static void ipip6_tunnel_init(struct net_device *dev);
67static void ipip6_tunnel_setup(struct net_device *dev); 66static void ipip6_tunnel_setup(struct net_device *dev);
68 67
@@ -364,7 +363,6 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
364 goto out; 363 goto out;
365 } 364 }
366 365
367 INIT_RCU_HEAD(&p->rcu_head);
368 p->next = t->prl; 366 p->next = t->prl;
369 p->addr = a->addr; 367 p->addr = a->addr;
370 p->flags = a->flags; 368 p->flags = a->flags;
@@ -745,7 +743,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
745 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); 743 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
746 744
747 if (skb->len > mtu) { 745 if (skb->len > mtu) {
748 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 746 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
749 ip_rt_put(rt); 747 ip_rt_put(rt);
750 goto tx_error; 748 goto tx_error;
751 } 749 }
@@ -1120,7 +1118,7 @@ static void ipip6_tunnel_init(struct net_device *dev)
1120 ipip6_tunnel_bind_dev(dev); 1118 ipip6_tunnel_bind_dev(dev);
1121} 1119}
1122 1120
1123static void ipip6_fb_tunnel_init(struct net_device *dev) 1121static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
1124{ 1122{
1125 struct ip_tunnel *tunnel = netdev_priv(dev); 1123 struct ip_tunnel *tunnel = netdev_priv(dev);
1126 struct iphdr *iph = &tunnel->parms.iph; 1124 struct iphdr *iph = &tunnel->parms.iph;
@@ -1145,7 +1143,7 @@ static struct xfrm_tunnel sit_handler = {
1145 .priority = 1, 1143 .priority = 1,
1146}; 1144};
1147 1145
1148static void sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head) 1146static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
1149{ 1147{
1150 int prio; 1148 int prio;
1151 1149
@@ -1162,7 +1160,7 @@ static void sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
1162 } 1160 }
1163} 1161}
1164 1162
1165static int sit_init_net(struct net *net) 1163static int __net_init sit_init_net(struct net *net)
1166{ 1164{
1167 struct sit_net *sitn = net_generic(net, sit_net_id); 1165 struct sit_net *sitn = net_generic(net, sit_net_id);
1168 int err; 1166 int err;
@@ -1195,7 +1193,7 @@ err_alloc_dev:
1195 return err; 1193 return err;
1196} 1194}
1197 1195
1198static void sit_exit_net(struct net *net) 1196static void __net_exit sit_exit_net(struct net *net)
1199{ 1197{
1200 struct sit_net *sitn = net_generic(net, sit_net_id); 1198 struct sit_net *sitn = net_generic(net, sit_net_id);
1201 LIST_HEAD(list); 1199 LIST_HEAD(list);
@@ -1228,15 +1226,14 @@ static int __init sit_init(void)
1228 1226
1229 printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n"); 1227 printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n");
1230 1228
1231 if (xfrm4_tunnel_register(&sit_handler, AF_INET6) < 0) {
1232 printk(KERN_INFO "sit init: Can't add protocol\n");
1233 return -EAGAIN;
1234 }
1235
1236 err = register_pernet_device(&sit_net_ops); 1229 err = register_pernet_device(&sit_net_ops);
1237 if (err < 0) 1230 if (err < 0)
1238 xfrm4_tunnel_deregister(&sit_handler, AF_INET6); 1231 return err;
1239 1232 err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
1233 if (err < 0) {
1234 unregister_pernet_device(&sit_net_ops);
1235 printk(KERN_INFO "sit init: Can't add protocol\n");
1236 }
1240 return err; 1237 return err;
1241} 1238}
1242 1239
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 5b9af508b8f2..34d1f0690d7e 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -185,6 +185,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
185 185
186 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); 186 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
187 187
188 /* check for timestamp cookie support */
189 memset(&tcp_opt, 0, sizeof(tcp_opt));
190 tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
191
192 if (tcp_opt.saw_tstamp)
193 cookie_check_timestamp(&tcp_opt);
194
188 ret = NULL; 195 ret = NULL;
189 req = inet6_reqsk_alloc(&tcp6_request_sock_ops); 196 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
190 if (!req) 197 if (!req)
@@ -218,6 +225,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
218 req->expires = 0UL; 225 req->expires = 0UL;
219 req->retrans = 0; 226 req->retrans = 0;
220 ireq->ecn_ok = 0; 227 ireq->ecn_ok = 0;
228 ireq->snd_wscale = tcp_opt.snd_wscale;
229 ireq->rcv_wscale = tcp_opt.rcv_wscale;
230 ireq->sack_ok = tcp_opt.sack_ok;
231 ireq->wscale_ok = tcp_opt.wscale_ok;
232 ireq->tstamp_ok = tcp_opt.saw_tstamp;
233 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
221 treq->rcv_isn = ntohl(th->seq) - 1; 234 treq->rcv_isn = ntohl(th->seq) - 1;
222 treq->snt_isn = cookie; 235 treq->snt_isn = cookie;
223 236
@@ -253,25 +266,11 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
253 goto out_free; 266 goto out_free;
254 } 267 }
255 268
256 /* check for timestamp cookie support */
257 memset(&tcp_opt, 0, sizeof(tcp_opt));
258 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, dst);
259
260 if (tcp_opt.saw_tstamp)
261 cookie_check_timestamp(&tcp_opt);
262
263 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
264
265 ireq->snd_wscale = tcp_opt.snd_wscale;
266 ireq->rcv_wscale = tcp_opt.rcv_wscale;
267 ireq->sack_ok = tcp_opt.sack_ok;
268 ireq->wscale_ok = tcp_opt.wscale_ok;
269 ireq->tstamp_ok = tcp_opt.saw_tstamp;
270
271 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); 269 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
272 tcp_select_initial_window(tcp_full_space(sk), req->mss, 270 tcp_select_initial_window(tcp_full_space(sk), req->mss,
273 &req->rcv_wnd, &req->window_clamp, 271 &req->rcv_wnd, &req->window_clamp,
274 ireq->wscale_ok, &rcv_wscale); 272 ireq->wscale_ok, &rcv_wscale,
273 dst_metric(dst, RTAX_INITRWND));
275 274
276 ireq->rcv_wscale = rcv_wscale; 275 ireq->rcv_wscale = rcv_wscale;
277 276
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index c690736885b4..f841d93bf987 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -55,7 +55,7 @@ struct ctl_path net_ipv6_ctl_path[] = {
55}; 55};
56EXPORT_SYMBOL_GPL(net_ipv6_ctl_path); 56EXPORT_SYMBOL_GPL(net_ipv6_ctl_path);
57 57
58static int ipv6_sysctl_net_init(struct net *net) 58static int __net_init ipv6_sysctl_net_init(struct net *net)
59{ 59{
60 struct ctl_table *ipv6_table; 60 struct ctl_table *ipv6_table;
61 struct ctl_table *ipv6_route_table; 61 struct ctl_table *ipv6_route_table;
@@ -98,7 +98,7 @@ out_ipv6_table:
98 goto out; 98 goto out;
99} 99}
100 100
101static void ipv6_sysctl_net_exit(struct net *net) 101static void __net_exit ipv6_sysctl_net_exit(struct net *net)
102{ 102{
103 struct ctl_table *ipv6_table; 103 struct ctl_table *ipv6_table;
104 struct ctl_table *ipv6_route_table; 104 struct ctl_table *ipv6_route_table;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index ee9cf62458d4..9b6dbba80d31 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -520,6 +520,13 @@ done:
520 return err; 520 return err;
521} 521}
522 522
523static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
524 struct request_values *rvp)
525{
526 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
527 return tcp_v6_send_synack(sk, req, rvp);
528}
529
523static inline void syn_flood_warning(struct sk_buff *skb) 530static inline void syn_flood_warning(struct sk_buff *skb)
524{ 531{
525#ifdef CONFIG_SYN_COOKIES 532#ifdef CONFIG_SYN_COOKIES
@@ -876,7 +883,7 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
876 883
877 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 884 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
878 if (net_ratelimit()) { 885 if (net_ratelimit()) {
879 printk(KERN_INFO "MD5 Hash %s for (%pI6, %u)->(%pI6, %u)\n", 886 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
880 genhash ? "failed" : "mismatch", 887 genhash ? "failed" : "mismatch",
881 &ip6h->saddr, ntohs(th->source), 888 &ip6h->saddr, ntohs(th->source),
882 &ip6h->daddr, ntohs(th->dest)); 889 &ip6h->daddr, ntohs(th->dest));
@@ -890,10 +897,11 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
890struct request_sock_ops tcp6_request_sock_ops __read_mostly = { 897struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
891 .family = AF_INET6, 898 .family = AF_INET6,
892 .obj_size = sizeof(struct tcp6_request_sock), 899 .obj_size = sizeof(struct tcp6_request_sock),
893 .rtx_syn_ack = tcp_v6_send_synack, 900 .rtx_syn_ack = tcp_v6_rtx_synack,
894 .send_ack = tcp_v6_reqsk_send_ack, 901 .send_ack = tcp_v6_reqsk_send_ack,
895 .destructor = tcp_v6_reqsk_destructor, 902 .destructor = tcp_v6_reqsk_destructor,
896 .send_reset = tcp_v6_send_reset 903 .send_reset = tcp_v6_send_reset,
904 .syn_ack_timeout = tcp_syn_ack_timeout,
897}; 905};
898 906
899#ifdef CONFIG_TCP_MD5SIG 907#ifdef CONFIG_TCP_MD5SIG
@@ -1169,7 +1177,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1169 struct inet6_request_sock *treq; 1177 struct inet6_request_sock *treq;
1170 struct ipv6_pinfo *np = inet6_sk(sk); 1178 struct ipv6_pinfo *np = inet6_sk(sk);
1171 struct tcp_sock *tp = tcp_sk(sk); 1179 struct tcp_sock *tp = tcp_sk(sk);
1172 struct dst_entry *dst = __sk_dst_get(sk);
1173 __u32 isn = TCP_SKB_CB(skb)->when; 1180 __u32 isn = TCP_SKB_CB(skb)->when;
1174#ifdef CONFIG_SYN_COOKIES 1181#ifdef CONFIG_SYN_COOKIES
1175 int want_cookie = 0; 1182 int want_cookie = 0;
@@ -1208,7 +1215,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1208 tcp_clear_options(&tmp_opt); 1215 tcp_clear_options(&tmp_opt);
1209 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 1216 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1210 tmp_opt.user_mss = tp->rx_opt.user_mss; 1217 tmp_opt.user_mss = tp->rx_opt.user_mss;
1211 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst); 1218 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1212 1219
1213 if (tmp_opt.cookie_plus > 0 && 1220 if (tmp_opt.cookie_plus > 0 &&
1214 tmp_opt.saw_tstamp && 1221 tmp_opt.saw_tstamp &&
@@ -1733,8 +1740,11 @@ process:
1733 if (!tcp_prequeue(sk, skb)) 1740 if (!tcp_prequeue(sk, skb))
1734 ret = tcp_v6_do_rcv(sk, skb); 1741 ret = tcp_v6_do_rcv(sk, skb);
1735 } 1742 }
1736 } else 1743 } else if (unlikely(sk_add_backlog(sk, skb))) {
1737 sk_add_backlog(sk, skb); 1744 bh_unlock_sock(sk);
1745 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1746 goto discard_and_relse;
1747 }
1738 bh_unlock_sock(sk); 1748 bh_unlock_sock(sk);
1739 1749
1740 sock_put(sk); 1750 sock_put(sk);
@@ -2106,7 +2116,7 @@ static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2106 }, 2116 },
2107}; 2117};
2108 2118
2109int tcp6_proc_init(struct net *net) 2119int __net_init tcp6_proc_init(struct net *net)
2110{ 2120{
2111 return tcp_proc_register(net, &tcp6_seq_afinfo); 2121 return tcp_proc_register(net, &tcp6_seq_afinfo);
2112} 2122}
@@ -2175,18 +2185,18 @@ static struct inet_protosw tcpv6_protosw = {
2175 INET_PROTOSW_ICSK, 2185 INET_PROTOSW_ICSK,
2176}; 2186};
2177 2187
2178static int tcpv6_net_init(struct net *net) 2188static int __net_init tcpv6_net_init(struct net *net)
2179{ 2189{
2180 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6, 2190 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2181 SOCK_RAW, IPPROTO_TCP, net); 2191 SOCK_RAW, IPPROTO_TCP, net);
2182} 2192}
2183 2193
2184static void tcpv6_net_exit(struct net *net) 2194static void __net_exit tcpv6_net_exit(struct net *net)
2185{ 2195{
2186 inet_ctl_sock_destroy(net->ipv6.tcp_sk); 2196 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2187} 2197}
2188 2198
2189static void tcpv6_net_exit_batch(struct list_head *net_exit_list) 2199static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2190{ 2200{
2191 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6); 2201 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2192} 2202}
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 51e2832d13a6..e17bc1dfc1a4 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -98,7 +98,7 @@ static int tunnel6_rcv(struct sk_buff *skb)
98 if (!handler->handler(skb)) 98 if (!handler->handler(skb))
99 return 0; 99 return 0;
100 100
101 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, skb->dev); 101 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
102 102
103drop: 103drop:
104 kfree_skb(skb); 104 kfree_skb(skb);
@@ -116,7 +116,7 @@ static int tunnel46_rcv(struct sk_buff *skb)
116 if (!handler->handler(skb)) 116 if (!handler->handler(skb))
117 return 0; 117 return 0;
118 118
119 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, skb->dev); 119 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
120 120
121drop: 121drop:
122 kfree_skb(skb); 122 kfree_skb(skb);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 69ebdbe78c47..3c0c9c755c92 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -322,7 +322,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
322 struct ipv6_pinfo *np = inet6_sk(sk); 322 struct ipv6_pinfo *np = inet6_sk(sk);
323 struct inet_sock *inet = inet_sk(sk); 323 struct inet_sock *inet = inet_sk(sk);
324 struct sk_buff *skb; 324 struct sk_buff *skb;
325 unsigned int ulen, copied; 325 unsigned int ulen;
326 int peeked; 326 int peeked;
327 int err; 327 int err;
328 int is_udplite = IS_UDPLITE(sk); 328 int is_udplite = IS_UDPLITE(sk);
@@ -341,10 +341,9 @@ try_again:
341 goto out; 341 goto out;
342 342
343 ulen = skb->len - sizeof(struct udphdr); 343 ulen = skb->len - sizeof(struct udphdr);
344 copied = len; 344 if (len > ulen)
345 if (copied > ulen) 345 len = ulen;
346 copied = ulen; 346 else if (len < ulen)
347 else if (copied < ulen)
348 msg->msg_flags |= MSG_TRUNC; 347 msg->msg_flags |= MSG_TRUNC;
349 348
350 is_udp4 = (skb->protocol == htons(ETH_P_IP)); 349 is_udp4 = (skb->protocol == htons(ETH_P_IP));
@@ -355,14 +354,14 @@ try_again:
355 * coverage checksum (UDP-Lite), do it before the copy. 354 * coverage checksum (UDP-Lite), do it before the copy.
356 */ 355 */
357 356
358 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { 357 if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
359 if (udp_lib_checksum_complete(skb)) 358 if (udp_lib_checksum_complete(skb))
360 goto csum_copy_err; 359 goto csum_copy_err;
361 } 360 }
362 361
363 if (skb_csum_unnecessary(skb)) 362 if (skb_csum_unnecessary(skb))
364 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), 363 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
365 msg->msg_iov, copied ); 364 msg->msg_iov,len);
366 else { 365 else {
367 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); 366 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
368 if (err == -EINVAL) 367 if (err == -EINVAL)
@@ -411,7 +410,7 @@ try_again:
411 datagram_recv_ctl(sk, msg, skb); 410 datagram_recv_ctl(sk, msg, skb);
412 } 411 }
413 412
414 err = copied; 413 err = len;
415 if (flags & MSG_TRUNC) 414 if (flags & MSG_TRUNC)
416 err = ulen; 415 err = ulen;
417 416
@@ -584,16 +583,20 @@ static void flush_stack(struct sock **stack, unsigned int count,
584 bh_lock_sock(sk); 583 bh_lock_sock(sk);
585 if (!sock_owned_by_user(sk)) 584 if (!sock_owned_by_user(sk))
586 udpv6_queue_rcv_skb(sk, skb1); 585 udpv6_queue_rcv_skb(sk, skb1);
587 else 586 else if (sk_add_backlog(sk, skb1)) {
588 sk_add_backlog(sk, skb1); 587 kfree_skb(skb1);
588 bh_unlock_sock(sk);
589 goto drop;
590 }
589 bh_unlock_sock(sk); 591 bh_unlock_sock(sk);
590 } else { 592 continue;
591 atomic_inc(&sk->sk_drops);
592 UDP6_INC_STATS_BH(sock_net(sk),
593 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
594 UDP6_INC_STATS_BH(sock_net(sk),
595 UDP_MIB_INERRORS, IS_UDPLITE(sk));
596 } 593 }
594drop:
595 atomic_inc(&sk->sk_drops);
596 UDP6_INC_STATS_BH(sock_net(sk),
597 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
598 UDP6_INC_STATS_BH(sock_net(sk),
599 UDP_MIB_INERRORS, IS_UDPLITE(sk));
597 } 600 }
598} 601}
599/* 602/*
@@ -681,12 +684,11 @@ static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh,
681int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 684int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
682 int proto) 685 int proto)
683{ 686{
687 struct net *net = dev_net(skb->dev);
684 struct sock *sk; 688 struct sock *sk;
685 struct udphdr *uh; 689 struct udphdr *uh;
686 struct net_device *dev = skb->dev;
687 struct in6_addr *saddr, *daddr; 690 struct in6_addr *saddr, *daddr;
688 u32 ulen = 0; 691 u32 ulen = 0;
689 struct net *net = dev_net(skb->dev);
690 692
691 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 693 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
692 goto short_packet; 694 goto short_packet;
@@ -745,7 +747,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
745 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, 747 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
746 proto == IPPROTO_UDPLITE); 748 proto == IPPROTO_UDPLITE);
747 749
748 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev); 750 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
749 751
750 kfree_skb(skb); 752 kfree_skb(skb);
751 return 0; 753 return 0;
@@ -756,8 +758,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
756 bh_lock_sock(sk); 758 bh_lock_sock(sk);
757 if (!sock_owned_by_user(sk)) 759 if (!sock_owned_by_user(sk))
758 udpv6_queue_rcv_skb(sk, skb); 760 udpv6_queue_rcv_skb(sk, skb);
759 else 761 else if (sk_add_backlog(sk, skb)) {
760 sk_add_backlog(sk, skb); 762 atomic_inc(&sk->sk_drops);
763 bh_unlock_sock(sk);
764 sock_put(sk);
765 goto discard;
766 }
761 bh_unlock_sock(sk); 767 bh_unlock_sock(sk);
762 sock_put(sk); 768 sock_put(sk);
763 return 0; 769 return 0;
@@ -1396,7 +1402,7 @@ static struct udp_seq_afinfo udp6_seq_afinfo = {
1396 }, 1402 },
1397}; 1403};
1398 1404
1399int udp6_proc_init(struct net *net) 1405int __net_init udp6_proc_init(struct net *net)
1400{ 1406{
1401 return udp_proc_register(net, &udp6_seq_afinfo); 1407 return udp_proc_register(net, &udp6_seq_afinfo);
1402} 1408}
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 6ea6938919e6..5f48fadc27f7 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -104,12 +104,12 @@ static struct udp_seq_afinfo udplite6_seq_afinfo = {
104 }, 104 },
105}; 105};
106 106
107static int udplite6_proc_init_net(struct net *net) 107static int __net_init udplite6_proc_init_net(struct net *net)
108{ 108{
109 return udp_proc_register(net, &udplite6_seq_afinfo); 109 return udp_proc_register(net, &udplite6_seq_afinfo);
110} 110}
111 111
112static void udplite6_proc_exit_net(struct net *net) 112static void __net_exit udplite6_proc_exit_net(struct net *net)
113{ 113{
114 udp_proc_unregister(net, &udplite6_seq_afinfo); 114 udp_proc_unregister(net, &udplite6_seq_afinfo);
115} 115}
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 9084582d236b..2bc98ede1235 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -101,7 +101,7 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
101 break; 101 break;
102 } 102 }
103 103
104 x = xfrm_state_lookup_byaddr(net, dst, src, proto, AF_INET6); 104 x = xfrm_state_lookup_byaddr(net, skb->mark, dst, src, proto, AF_INET6);
105 if (!x) 105 if (!x)
106 continue; 106 continue;
107 107
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index c4f4eef032a3..0c92112dcba3 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -38,7 +38,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
38 38
39 if (!skb->local_df && skb->len > mtu) { 39 if (!skb->local_df && skb->len > mtu) {
40 skb->dev = dst->dev; 40 skb->dev = dst->dev;
41 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 41 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
42 ret = -EMSGSIZE; 42 ret = -EMSGSIZE;
43 } 43 }
44 44
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 7254e3f899a7..ae181651c75a 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -24,7 +24,6 @@
24#include <net/mip6.h> 24#include <net/mip6.h>
25#endif 25#endif
26 26
27static struct dst_ops xfrm6_dst_ops;
28static struct xfrm_policy_afinfo xfrm6_policy_afinfo; 27static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
29 28
30static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, 29static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
@@ -117,7 +116,8 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
117 return 0; 116 return 0;
118} 117}
119 118
120static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) 119static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
120 struct flowi *fl)
121{ 121{
122 struct rt6_info *rt = (struct rt6_info*)xdst->route; 122 struct rt6_info *rt = (struct rt6_info*)xdst->route;
123 123
@@ -224,8 +224,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
224 224
225static inline int xfrm6_garbage_collect(struct dst_ops *ops) 225static inline int xfrm6_garbage_collect(struct dst_ops *ops)
226{ 226{
227 xfrm6_policy_afinfo.garbage_collect(&init_net); 227 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
228 return (atomic_read(&xfrm6_dst_ops.entries) > xfrm6_dst_ops.gc_thresh*2); 228
229 xfrm6_policy_afinfo.garbage_collect(net);
230 return (atomic_read(&ops->entries) > ops->gc_thresh * 2);
229} 231}
230 232
231static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu) 233static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -310,7 +312,7 @@ static void xfrm6_policy_fini(void)
310static struct ctl_table xfrm6_policy_table[] = { 312static struct ctl_table xfrm6_policy_table[] = {
311 { 313 {
312 .procname = "xfrm6_gc_thresh", 314 .procname = "xfrm6_gc_thresh",
313 .data = &xfrm6_dst_ops.gc_thresh, 315 .data = &init_net.xfrm.xfrm6_dst_ops.gc_thresh,
314 .maxlen = sizeof(int), 316 .maxlen = sizeof(int),
315 .mode = 0644, 317 .mode = 0644,
316 .proc_handler = proc_dointvec, 318 .proc_handler = proc_dointvec,
@@ -326,13 +328,6 @@ int __init xfrm6_init(void)
326 int ret; 328 int ret;
327 unsigned int gc_thresh; 329 unsigned int gc_thresh;
328 330
329 ret = xfrm6_policy_init();
330 if (ret)
331 goto out;
332
333 ret = xfrm6_state_init();
334 if (ret)
335 goto out_policy;
336 /* 331 /*
337 * We need a good default value for the xfrm6 gc threshold. 332 * We need a good default value for the xfrm6 gc threshold.
338 * In ipv4 we set it to the route hash table size * 8, which 333 * In ipv4 we set it to the route hash table size * 8, which
@@ -346,6 +341,15 @@ int __init xfrm6_init(void)
346 */ 341 */
347 gc_thresh = FIB6_TABLE_HASHSZ * 8; 342 gc_thresh = FIB6_TABLE_HASHSZ * 8;
348 xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh; 343 xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh;
344
345 ret = xfrm6_policy_init();
346 if (ret)
347 goto out;
348
349 ret = xfrm6_state_init();
350 if (ret)
351 goto out_policy;
352
349#ifdef CONFIG_SYSCTL 353#ifdef CONFIG_SYSCTL
350 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, 354 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path,
351 xfrm6_policy_table); 355 xfrm6_policy_table);
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 438831d33593..fa85a7d22dc4 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -30,6 +30,25 @@
30#include <linux/ipv6.h> 30#include <linux/ipv6.h>
31#include <linux/icmpv6.h> 31#include <linux/icmpv6.h>
32#include <linux/mutex.h> 32#include <linux/mutex.h>
33#include <net/netns/generic.h>
34
35#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
36#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
37
38#define XFRM6_TUNNEL_SPI_MIN 1
39#define XFRM6_TUNNEL_SPI_MAX 0xffffffff
40
41struct xfrm6_tunnel_net {
42 struct hlist_head spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
43 struct hlist_head spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
44 u32 spi;
45};
46
47static int xfrm6_tunnel_net_id __read_mostly;
48static inline struct xfrm6_tunnel_net *xfrm6_tunnel_pernet(struct net *net)
49{
50 return net_generic(net, xfrm6_tunnel_net_id);
51}
33 52
34/* 53/*
35 * xfrm_tunnel_spi things are for allocating unique id ("spi") 54 * xfrm_tunnel_spi things are for allocating unique id ("spi")
@@ -46,19 +65,8 @@ struct xfrm6_tunnel_spi {
46 65
47static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock); 66static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
48 67
49static u32 xfrm6_tunnel_spi;
50
51#define XFRM6_TUNNEL_SPI_MIN 1
52#define XFRM6_TUNNEL_SPI_MAX 0xffffffff
53
54static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; 68static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
55 69
56#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
57#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
58
59static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
60static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
61
62static inline unsigned xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr) 70static inline unsigned xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
63{ 71{
64 unsigned h; 72 unsigned h;
@@ -76,50 +84,14 @@ static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi)
76 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE; 84 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
77} 85}
78 86
79 87static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr)
80static int xfrm6_tunnel_spi_init(void)
81{
82 int i;
83
84 xfrm6_tunnel_spi = 0;
85 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
86 sizeof(struct xfrm6_tunnel_spi),
87 0, SLAB_HWCACHE_ALIGN,
88 NULL);
89 if (!xfrm6_tunnel_spi_kmem)
90 return -ENOMEM;
91
92 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
93 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]);
94 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
95 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byspi[i]);
96 return 0;
97}
98
99static void xfrm6_tunnel_spi_fini(void)
100{
101 int i;
102
103 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) {
104 if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i]))
105 return;
106 }
107 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) {
108 if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i]))
109 return;
110 }
111 rcu_barrier();
112 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
113 xfrm6_tunnel_spi_kmem = NULL;
114}
115
116static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
117{ 88{
89 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
118 struct xfrm6_tunnel_spi *x6spi; 90 struct xfrm6_tunnel_spi *x6spi;
119 struct hlist_node *pos; 91 struct hlist_node *pos;
120 92
121 hlist_for_each_entry_rcu(x6spi, pos, 93 hlist_for_each_entry_rcu(x6spi, pos,
122 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 94 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
123 list_byaddr) { 95 list_byaddr) {
124 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) 96 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0)
125 return x6spi; 97 return x6spi;
@@ -128,13 +100,13 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
128 return NULL; 100 return NULL;
129} 101}
130 102
131__be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) 103__be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr)
132{ 104{
133 struct xfrm6_tunnel_spi *x6spi; 105 struct xfrm6_tunnel_spi *x6spi;
134 u32 spi; 106 u32 spi;
135 107
136 rcu_read_lock_bh(); 108 rcu_read_lock_bh();
137 x6spi = __xfrm6_tunnel_spi_lookup(saddr); 109 x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
138 spi = x6spi ? x6spi->spi : 0; 110 spi = x6spi ? x6spi->spi : 0;
139 rcu_read_unlock_bh(); 111 rcu_read_unlock_bh();
140 return htonl(spi); 112 return htonl(spi);
@@ -142,14 +114,15 @@ __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
142 114
143EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup); 115EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
144 116
145static int __xfrm6_tunnel_spi_check(u32 spi) 117static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi)
146{ 118{
119 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
147 struct xfrm6_tunnel_spi *x6spi; 120 struct xfrm6_tunnel_spi *x6spi;
148 int index = xfrm6_tunnel_spi_hash_byspi(spi); 121 int index = xfrm6_tunnel_spi_hash_byspi(spi);
149 struct hlist_node *pos; 122 struct hlist_node *pos;
150 123
151 hlist_for_each_entry(x6spi, pos, 124 hlist_for_each_entry(x6spi, pos,
152 &xfrm6_tunnel_spi_byspi[index], 125 &xfrm6_tn->spi_byspi[index],
153 list_byspi) { 126 list_byspi) {
154 if (x6spi->spi == spi) 127 if (x6spi->spi == spi)
155 return -1; 128 return -1;
@@ -157,61 +130,61 @@ static int __xfrm6_tunnel_spi_check(u32 spi)
157 return index; 130 return index;
158} 131}
159 132
160static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) 133static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
161{ 134{
135 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
162 u32 spi; 136 u32 spi;
163 struct xfrm6_tunnel_spi *x6spi; 137 struct xfrm6_tunnel_spi *x6spi;
164 int index; 138 int index;
165 139
166 if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN || 140 if (xfrm6_tn->spi < XFRM6_TUNNEL_SPI_MIN ||
167 xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX) 141 xfrm6_tn->spi >= XFRM6_TUNNEL_SPI_MAX)
168 xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN; 142 xfrm6_tn->spi = XFRM6_TUNNEL_SPI_MIN;
169 else 143 else
170 xfrm6_tunnel_spi++; 144 xfrm6_tn->spi++;
171 145
172 for (spi = xfrm6_tunnel_spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) { 146 for (spi = xfrm6_tn->spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
173 index = __xfrm6_tunnel_spi_check(spi); 147 index = __xfrm6_tunnel_spi_check(net, spi);
174 if (index >= 0) 148 if (index >= 0)
175 goto alloc_spi; 149 goto alloc_spi;
176 } 150 }
177 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tunnel_spi; spi++) { 151 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
178 index = __xfrm6_tunnel_spi_check(spi); 152 index = __xfrm6_tunnel_spi_check(net, spi);
179 if (index >= 0) 153 if (index >= 0)
180 goto alloc_spi; 154 goto alloc_spi;
181 } 155 }
182 spi = 0; 156 spi = 0;
183 goto out; 157 goto out;
184alloc_spi: 158alloc_spi:
185 xfrm6_tunnel_spi = spi; 159 xfrm6_tn->spi = spi;
186 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC); 160 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
187 if (!x6spi) 161 if (!x6spi)
188 goto out; 162 goto out;
189 163
190 INIT_RCU_HEAD(&x6spi->rcu_head);
191 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr)); 164 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
192 x6spi->spi = spi; 165 x6spi->spi = spi;
193 atomic_set(&x6spi->refcnt, 1); 166 atomic_set(&x6spi->refcnt, 1);
194 167
195 hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]); 168 hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tn->spi_byspi[index]);
196 169
197 index = xfrm6_tunnel_spi_hash_byaddr(saddr); 170 index = xfrm6_tunnel_spi_hash_byaddr(saddr);
198 hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]); 171 hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tn->spi_byaddr[index]);
199out: 172out:
200 return spi; 173 return spi;
201} 174}
202 175
203__be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) 176__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
204{ 177{
205 struct xfrm6_tunnel_spi *x6spi; 178 struct xfrm6_tunnel_spi *x6spi;
206 u32 spi; 179 u32 spi;
207 180
208 spin_lock_bh(&xfrm6_tunnel_spi_lock); 181 spin_lock_bh(&xfrm6_tunnel_spi_lock);
209 x6spi = __xfrm6_tunnel_spi_lookup(saddr); 182 x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
210 if (x6spi) { 183 if (x6spi) {
211 atomic_inc(&x6spi->refcnt); 184 atomic_inc(&x6spi->refcnt);
212 spi = x6spi->spi; 185 spi = x6spi->spi;
213 } else 186 } else
214 spi = __xfrm6_tunnel_alloc_spi(saddr); 187 spi = __xfrm6_tunnel_alloc_spi(net, saddr);
215 spin_unlock_bh(&xfrm6_tunnel_spi_lock); 188 spin_unlock_bh(&xfrm6_tunnel_spi_lock);
216 189
217 return htonl(spi); 190 return htonl(spi);
@@ -225,15 +198,16 @@ static void x6spi_destroy_rcu(struct rcu_head *head)
225 container_of(head, struct xfrm6_tunnel_spi, rcu_head)); 198 container_of(head, struct xfrm6_tunnel_spi, rcu_head));
226} 199}
227 200
228void xfrm6_tunnel_free_spi(xfrm_address_t *saddr) 201void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr)
229{ 202{
203 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
230 struct xfrm6_tunnel_spi *x6spi; 204 struct xfrm6_tunnel_spi *x6spi;
231 struct hlist_node *pos, *n; 205 struct hlist_node *pos, *n;
232 206
233 spin_lock_bh(&xfrm6_tunnel_spi_lock); 207 spin_lock_bh(&xfrm6_tunnel_spi_lock);
234 208
235 hlist_for_each_entry_safe(x6spi, pos, n, 209 hlist_for_each_entry_safe(x6spi, pos, n,
236 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 210 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
237 list_byaddr) 211 list_byaddr)
238 { 212 {
239 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { 213 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
@@ -263,10 +237,11 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
263 237
264static int xfrm6_tunnel_rcv(struct sk_buff *skb) 238static int xfrm6_tunnel_rcv(struct sk_buff *skb)
265{ 239{
240 struct net *net = dev_net(skb->dev);
266 struct ipv6hdr *iph = ipv6_hdr(skb); 241 struct ipv6hdr *iph = ipv6_hdr(skb);
267 __be32 spi; 242 __be32 spi;
268 243
269 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr); 244 spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&iph->saddr);
270 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0; 245 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0;
271} 246}
272 247
@@ -326,7 +301,9 @@ static int xfrm6_tunnel_init_state(struct xfrm_state *x)
326 301
327static void xfrm6_tunnel_destroy(struct xfrm_state *x) 302static void xfrm6_tunnel_destroy(struct xfrm_state *x)
328{ 303{
329 xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr); 304 struct net *net = xs_net(x);
305
306 xfrm6_tunnel_free_spi(net, (xfrm_address_t *)&x->props.saddr);
330} 307}
331 308
332static const struct xfrm_type xfrm6_tunnel_type = { 309static const struct xfrm_type xfrm6_tunnel_type = {
@@ -351,34 +328,73 @@ static struct xfrm6_tunnel xfrm46_tunnel_handler = {
351 .priority = 2, 328 .priority = 2,
352}; 329};
353 330
331static int __net_init xfrm6_tunnel_net_init(struct net *net)
332{
333 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
334 unsigned int i;
335
336 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
337 INIT_HLIST_HEAD(&xfrm6_tn->spi_byaddr[i]);
338 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
339 INIT_HLIST_HEAD(&xfrm6_tn->spi_byspi[i]);
340 xfrm6_tn->spi = 0;
341
342 return 0;
343}
344
345static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
346{
347}
348
349static struct pernet_operations xfrm6_tunnel_net_ops = {
350 .init = xfrm6_tunnel_net_init,
351 .exit = xfrm6_tunnel_net_exit,
352 .id = &xfrm6_tunnel_net_id,
353 .size = sizeof(struct xfrm6_tunnel_net),
354};
355
354static int __init xfrm6_tunnel_init(void) 356static int __init xfrm6_tunnel_init(void)
355{ 357{
356 if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0) 358 int rv;
357 goto err; 359
358 if (xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6)) 360 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
359 goto unreg; 361 sizeof(struct xfrm6_tunnel_spi),
360 if (xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET)) 362 0, SLAB_HWCACHE_ALIGN,
361 goto dereg6; 363 NULL);
362 if (xfrm6_tunnel_spi_init() < 0) 364 if (!xfrm6_tunnel_spi_kmem)
363 goto dereg46; 365 return -ENOMEM;
366 rv = register_pernet_subsys(&xfrm6_tunnel_net_ops);
367 if (rv < 0)
368 goto out_pernet;
369 rv = xfrm_register_type(&xfrm6_tunnel_type, AF_INET6);
370 if (rv < 0)
371 goto out_type;
372 rv = xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6);
373 if (rv < 0)
374 goto out_xfrm6;
375 rv = xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET);
376 if (rv < 0)
377 goto out_xfrm46;
364 return 0; 378 return 0;
365 379
366dereg46: 380out_xfrm46:
367 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
368dereg6:
369 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6); 381 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
370unreg: 382out_xfrm6:
371 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); 383 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
372err: 384out_type:
373 return -EAGAIN; 385 unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
386out_pernet:
387 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
388 return rv;
374} 389}
375 390
376static void __exit xfrm6_tunnel_fini(void) 391static void __exit xfrm6_tunnel_fini(void)
377{ 392{
378 xfrm6_tunnel_spi_fini();
379 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET); 393 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
380 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6); 394 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
381 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); 395 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
396 unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
397 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
382} 398}
383 399
384module_init(xfrm6_tunnel_init); 400module_init(xfrm6_tunnel_init);
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index 576178482f89..26b5bfcf1d03 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -13,45 +13,15 @@
13#include <net/tcp_states.h> 13#include <net/tcp_states.h>
14#include <net/ipx.h> 14#include <net/ipx.h>
15 15
16static __inline__ struct ipx_interface *ipx_get_interface_idx(loff_t pos)
17{
18 struct ipx_interface *i;
19
20 list_for_each_entry(i, &ipx_interfaces, node)
21 if (!pos--)
22 goto out;
23 i = NULL;
24out:
25 return i;
26}
27
28static struct ipx_interface *ipx_interfaces_next(struct ipx_interface *i)
29{
30 struct ipx_interface *rc = NULL;
31
32 if (i->node.next != &ipx_interfaces)
33 rc = list_entry(i->node.next, struct ipx_interface, node);
34 return rc;
35}
36
37static void *ipx_seq_interface_start(struct seq_file *seq, loff_t *pos) 16static void *ipx_seq_interface_start(struct seq_file *seq, loff_t *pos)
38{ 17{
39 loff_t l = *pos;
40
41 spin_lock_bh(&ipx_interfaces_lock); 18 spin_lock_bh(&ipx_interfaces_lock);
42 return l ? ipx_get_interface_idx(--l) : SEQ_START_TOKEN; 19 return seq_list_start_head(&ipx_interfaces, *pos);
43} 20}
44 21
45static void *ipx_seq_interface_next(struct seq_file *seq, void *v, loff_t *pos) 22static void *ipx_seq_interface_next(struct seq_file *seq, void *v, loff_t *pos)
46{ 23{
47 struct ipx_interface *i; 24 return seq_list_next(v, &ipx_interfaces, pos);
48
49 ++*pos;
50 if (v == SEQ_START_TOKEN)
51 i = ipx_interfaces_head();
52 else
53 i = ipx_interfaces_next(v);
54 return i;
55} 25}
56 26
57static void ipx_seq_interface_stop(struct seq_file *seq, void *v) 27static void ipx_seq_interface_stop(struct seq_file *seq, void *v)
@@ -63,7 +33,7 @@ static int ipx_seq_interface_show(struct seq_file *seq, void *v)
63{ 33{
64 struct ipx_interface *i; 34 struct ipx_interface *i;
65 35
66 if (v == SEQ_START_TOKEN) { 36 if (v == &ipx_interfaces) {
67 seq_puts(seq, "Network Node_Address Primary Device " 37 seq_puts(seq, "Network Node_Address Primary Device "
68 "Frame_Type"); 38 "Frame_Type");
69#ifdef IPX_REFCNT_DEBUG 39#ifdef IPX_REFCNT_DEBUG
@@ -73,7 +43,7 @@ static int ipx_seq_interface_show(struct seq_file *seq, void *v)
73 goto out; 43 goto out;
74 } 44 }
75 45
76 i = v; 46 i = list_entry(v, struct ipx_interface, node);
77 seq_printf(seq, "%08lX ", (unsigned long int)ntohl(i->if_netnum)); 47 seq_printf(seq, "%08lX ", (unsigned long int)ntohl(i->if_netnum));
78 seq_printf(seq, "%02X%02X%02X%02X%02X%02X ", 48 seq_printf(seq, "%02X%02X%02X%02X%02X%02X ",
79 i->if_node[0], i->if_node[1], i->if_node[2], 49 i->if_node[0], i->if_node[1], i->if_node[2],
@@ -89,53 +59,15 @@ out:
89 return 0; 59 return 0;
90} 60}
91 61
92static struct ipx_route *ipx_routes_head(void)
93{
94 struct ipx_route *rc = NULL;
95
96 if (!list_empty(&ipx_routes))
97 rc = list_entry(ipx_routes.next, struct ipx_route, node);
98 return rc;
99}
100
101static struct ipx_route *ipx_routes_next(struct ipx_route *r)
102{
103 struct ipx_route *rc = NULL;
104
105 if (r->node.next != &ipx_routes)
106 rc = list_entry(r->node.next, struct ipx_route, node);
107 return rc;
108}
109
110static __inline__ struct ipx_route *ipx_get_route_idx(loff_t pos)
111{
112 struct ipx_route *r;
113
114 list_for_each_entry(r, &ipx_routes, node)
115 if (!pos--)
116 goto out;
117 r = NULL;
118out:
119 return r;
120}
121
122static void *ipx_seq_route_start(struct seq_file *seq, loff_t *pos) 62static void *ipx_seq_route_start(struct seq_file *seq, loff_t *pos)
123{ 63{
124 loff_t l = *pos;
125 read_lock_bh(&ipx_routes_lock); 64 read_lock_bh(&ipx_routes_lock);
126 return l ? ipx_get_route_idx(--l) : SEQ_START_TOKEN; 65 return seq_list_start_head(&ipx_routes, *pos);
127} 66}
128 67
129static void *ipx_seq_route_next(struct seq_file *seq, void *v, loff_t *pos) 68static void *ipx_seq_route_next(struct seq_file *seq, void *v, loff_t *pos)
130{ 69{
131 struct ipx_route *r; 70 return seq_list_next(v, &ipx_routes, pos);
132
133 ++*pos;
134 if (v == SEQ_START_TOKEN)
135 r = ipx_routes_head();
136 else
137 r = ipx_routes_next(v);
138 return r;
139} 71}
140 72
141static void ipx_seq_route_stop(struct seq_file *seq, void *v) 73static void ipx_seq_route_stop(struct seq_file *seq, void *v)
@@ -147,11 +79,13 @@ static int ipx_seq_route_show(struct seq_file *seq, void *v)
147{ 79{
148 struct ipx_route *rt; 80 struct ipx_route *rt;
149 81
150 if (v == SEQ_START_TOKEN) { 82 if (v == &ipx_routes) {
151 seq_puts(seq, "Network Router_Net Router_Node\n"); 83 seq_puts(seq, "Network Router_Net Router_Node\n");
152 goto out; 84 goto out;
153 } 85 }
154 rt = v; 86
87 rt = list_entry(v, struct ipx_route, node);
88
155 seq_printf(seq, "%08lX ", (unsigned long int)ntohl(rt->ir_net)); 89 seq_printf(seq, "%08lX ", (unsigned long int)ntohl(rt->ir_net));
156 if (rt->ir_routed) 90 if (rt->ir_routed)
157 seq_printf(seq, "%08lX %02X%02X%02X%02X%02X%02X\n", 91 seq_printf(seq, "%08lX %02X%02X%02X%02X%02X%02X\n",
@@ -226,9 +160,9 @@ static void *ipx_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
226 spin_unlock_bh(&i->if_sklist_lock); 160 spin_unlock_bh(&i->if_sklist_lock);
227 sk = NULL; 161 sk = NULL;
228 for (;;) { 162 for (;;) {
229 i = ipx_interfaces_next(i); 163 if (i->node.next == &ipx_interfaces)
230 if (!i)
231 break; 164 break;
165 i = list_entry(i->node.next, struct ipx_interface, node);
232 spin_lock_bh(&i->if_sklist_lock); 166 spin_lock_bh(&i->if_sklist_lock);
233 if (!hlist_empty(&i->if_sklist)) { 167 if (!hlist_empty(&i->if_sklist)) {
234 sk = sk_head(&i->if_sklist); 168 sk = sk_head(&i->if_sklist);
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 811984d9324b..8b85d774e47f 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -496,9 +496,6 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
496 496
497 IRDA_DEBUG(0, "%s()\n", __func__ ); 497 IRDA_DEBUG(0, "%s()\n", __func__ );
498 498
499 if (!tty)
500 return;
501
502 IRDA_ASSERT(self != NULL, return;); 499 IRDA_ASSERT(self != NULL, return;);
503 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 500 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
504 501
@@ -1007,9 +1004,6 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
1007 IRDA_ASSERT(self != NULL, return;); 1004 IRDA_ASSERT(self != NULL, return;);
1008 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 1005 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
1009 1006
1010 if (!tty)
1011 return;
1012
1013 /* ircomm_tty_flush_buffer(tty); */ 1007 /* ircomm_tty_flush_buffer(tty); */
1014 ircomm_tty_shutdown(self); 1008 ircomm_tty_shutdown(self);
1015 1009
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 315ead3cb926..e486dc89ea59 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -1128,34 +1128,14 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
1128 */ 1128 */
1129static void *irlan_seq_start(struct seq_file *seq, loff_t *pos) 1129static void *irlan_seq_start(struct seq_file *seq, loff_t *pos)
1130{ 1130{
1131 int i = 1;
1132 struct irlan_cb *self;
1133
1134 rcu_read_lock(); 1131 rcu_read_lock();
1135 if (*pos == 0) 1132 return seq_list_start_head(&irlans, *pos);
1136 return SEQ_START_TOKEN;
1137
1138 list_for_each_entry(self, &irlans, dev_list) {
1139 if (*pos == i)
1140 return self;
1141 ++i;
1142 }
1143 return NULL;
1144} 1133}
1145 1134
1146/* Return entry after v, and increment pos */ 1135/* Return entry after v, and increment pos */
1147static void *irlan_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1136static void *irlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1148{ 1137{
1149 struct list_head *nxt; 1138 return seq_list_next(v, &irlans, pos);
1150
1151 ++*pos;
1152 if (v == SEQ_START_TOKEN)
1153 nxt = irlans.next;
1154 else
1155 nxt = ((struct irlan_cb *)v)->dev_list.next;
1156
1157 return (nxt == &irlans) ? NULL
1158 : list_entry(nxt, struct irlan_cb, dev_list);
1159} 1139}
1160 1140
1161/* End of reading /proc file */ 1141/* End of reading /proc file */
@@ -1170,10 +1150,10 @@ static void irlan_seq_stop(struct seq_file *seq, void *v)
1170 */ 1150 */
1171static int irlan_seq_show(struct seq_file *seq, void *v) 1151static int irlan_seq_show(struct seq_file *seq, void *v)
1172{ 1152{
1173 if (v == SEQ_START_TOKEN) 1153 if (v == &irlans)
1174 seq_puts(seq, "IrLAN instances:\n"); 1154 seq_puts(seq, "IrLAN instances:\n");
1175 else { 1155 else {
1176 struct irlan_cb *self = v; 1156 struct irlan_cb *self = list_entry(v, struct irlan_cb, dev_list);
1177 1157
1178 IRDA_ASSERT(self != NULL, return -1;); 1158 IRDA_ASSERT(self != NULL, return -1;);
1179 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); 1159 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index d340110f5c0c..9616c32d1076 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -321,14 +321,15 @@ static void irlan_eth_set_multicast_list(struct net_device *dev)
321 /* Enable promiscuous mode */ 321 /* Enable promiscuous mode */
322 IRDA_WARNING("Promiscuous mode not implemented by IrLAN!\n"); 322 IRDA_WARNING("Promiscuous mode not implemented by IrLAN!\n");
323 } 323 }
324 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) { 324 else if ((dev->flags & IFF_ALLMULTI) ||
325 netdev_mc_count(dev) > HW_MAX_ADDRS) {
325 /* Disable promiscuous mode, use normal mode. */ 326 /* Disable promiscuous mode, use normal mode. */
326 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ ); 327 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
327 /* hardware_set_filter(NULL); */ 328 /* hardware_set_filter(NULL); */
328 329
329 irlan_set_multicast_filter(self, TRUE); 330 irlan_set_multicast_filter(self, TRUE);
330 } 331 }
331 else if (dev->mc_count) { 332 else if (!netdev_mc_empty(dev)) {
332 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ ); 333 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
333 /* Walk the address list, and load the filter */ 334 /* Walk the address list, and load the filter */
334 /* hardware_set_filter(dev->mc_list); */ 335 /* hardware_set_filter(dev->mc_list); */
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 156020d138b5..6b3602de359a 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -698,15 +698,18 @@ dev_irnet_ioctl(
698 698
699 /* Query PPP channel and unit number */ 699 /* Query PPP channel and unit number */
700 case PPPIOCGCHAN: 700 case PPPIOCGCHAN:
701 lock_kernel();
701 if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan), 702 if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan),
702 (int __user *)argp)) 703 (int __user *)argp))
703 err = 0; 704 err = 0;
705 unlock_kernel();
704 break; 706 break;
705 case PPPIOCGUNIT: 707 case PPPIOCGUNIT:
706 lock_kernel(); 708 lock_kernel();
707 if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan), 709 if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan),
708 (int __user *)argp)) 710 (int __user *)argp))
709 err = 0; 711 err = 0;
712 unlock_kernel();
710 break; 713 break;
711 714
712 /* All these ioctls can be passed both directly and from ppp_generic, 715 /* All these ioctls can be passed both directly and from ppp_generic,
diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c
index 476b307bd801..69b5b75f5431 100644
--- a/net/irda/irnetlink.c
+++ b/net/irda/irnetlink.c
@@ -124,7 +124,7 @@ static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info)
124 return ret; 124 return ret;
125} 125}
126 126
127static struct nla_policy irda_nl_policy[IRDA_NL_ATTR_MAX + 1] = { 127static const struct nla_policy irda_nl_policy[IRDA_NL_ATTR_MAX + 1] = {
128 [IRDA_NL_ATTR_IFNAME] = { .type = NLA_NUL_STRING, 128 [IRDA_NL_ATTR_IFNAME] = { .type = NLA_NUL_STRING,
129 .len = IFNAMSIZ-1 }, 129 .len = IFNAMSIZ-1 },
130 [IRDA_NL_ATTR_MODE] = { .type = NLA_U32 }, 130 [IRDA_NL_ATTR_MODE] = { .type = NLA_U32 },
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 76fa6fef6473..344145f23c34 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -41,10 +41,10 @@ struct netns_pfkey {
41 struct hlist_head table; 41 struct hlist_head table;
42 atomic_t socks_nr; 42 atomic_t socks_nr;
43}; 43};
44static DECLARE_WAIT_QUEUE_HEAD(pfkey_table_wait); 44static DEFINE_MUTEX(pfkey_mutex);
45static DEFINE_RWLOCK(pfkey_table_lock);
46static atomic_t pfkey_table_users = ATOMIC_INIT(0);
47 45
46#define DUMMY_MARK 0
47static struct xfrm_mark dummy_mark = {0, 0};
48struct pfkey_sock { 48struct pfkey_sock {
49 /* struct sock must be the first member of struct pfkey_sock */ 49 /* struct sock must be the first member of struct pfkey_sock */
50 struct sock sk; 50 struct sock sk;
@@ -108,50 +108,6 @@ static void pfkey_sock_destruct(struct sock *sk)
108 atomic_dec(&net_pfkey->socks_nr); 108 atomic_dec(&net_pfkey->socks_nr);
109} 109}
110 110
111static void pfkey_table_grab(void)
112{
113 write_lock_bh(&pfkey_table_lock);
114
115 if (atomic_read(&pfkey_table_users)) {
116 DECLARE_WAITQUEUE(wait, current);
117
118 add_wait_queue_exclusive(&pfkey_table_wait, &wait);
119 for(;;) {
120 set_current_state(TASK_UNINTERRUPTIBLE);
121 if (atomic_read(&pfkey_table_users) == 0)
122 break;
123 write_unlock_bh(&pfkey_table_lock);
124 schedule();
125 write_lock_bh(&pfkey_table_lock);
126 }
127
128 __set_current_state(TASK_RUNNING);
129 remove_wait_queue(&pfkey_table_wait, &wait);
130 }
131}
132
133static __inline__ void pfkey_table_ungrab(void)
134{
135 write_unlock_bh(&pfkey_table_lock);
136 wake_up(&pfkey_table_wait);
137}
138
139static __inline__ void pfkey_lock_table(void)
140{
141 /* read_lock() synchronizes us to pfkey_table_grab */
142
143 read_lock(&pfkey_table_lock);
144 atomic_inc(&pfkey_table_users);
145 read_unlock(&pfkey_table_lock);
146}
147
148static __inline__ void pfkey_unlock_table(void)
149{
150 if (atomic_dec_and_test(&pfkey_table_users))
151 wake_up(&pfkey_table_wait);
152}
153
154
155static const struct proto_ops pfkey_ops; 111static const struct proto_ops pfkey_ops;
156 112
157static void pfkey_insert(struct sock *sk) 113static void pfkey_insert(struct sock *sk)
@@ -159,16 +115,16 @@ static void pfkey_insert(struct sock *sk)
159 struct net *net = sock_net(sk); 115 struct net *net = sock_net(sk);
160 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 116 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
161 117
162 pfkey_table_grab(); 118 mutex_lock(&pfkey_mutex);
163 sk_add_node(sk, &net_pfkey->table); 119 sk_add_node_rcu(sk, &net_pfkey->table);
164 pfkey_table_ungrab(); 120 mutex_unlock(&pfkey_mutex);
165} 121}
166 122
167static void pfkey_remove(struct sock *sk) 123static void pfkey_remove(struct sock *sk)
168{ 124{
169 pfkey_table_grab(); 125 mutex_lock(&pfkey_mutex);
170 sk_del_node_init(sk); 126 sk_del_node_init_rcu(sk);
171 pfkey_table_ungrab(); 127 mutex_unlock(&pfkey_mutex);
172} 128}
173 129
174static struct proto key_proto = { 130static struct proto key_proto = {
@@ -223,6 +179,8 @@ static int pfkey_release(struct socket *sock)
223 sock_orphan(sk); 179 sock_orphan(sk);
224 sock->sk = NULL; 180 sock->sk = NULL;
225 skb_queue_purge(&sk->sk_write_queue); 181 skb_queue_purge(&sk->sk_write_queue);
182
183 synchronize_rcu();
226 sock_put(sk); 184 sock_put(sk);
227 185
228 return 0; 186 return 0;
@@ -277,8 +235,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
277 if (!skb) 235 if (!skb)
278 return -ENOMEM; 236 return -ENOMEM;
279 237
280 pfkey_lock_table(); 238 rcu_read_lock();
281 sk_for_each(sk, node, &net_pfkey->table) { 239 sk_for_each_rcu(sk, node, &net_pfkey->table) {
282 struct pfkey_sock *pfk = pfkey_sk(sk); 240 struct pfkey_sock *pfk = pfkey_sk(sk);
283 int err2; 241 int err2;
284 242
@@ -309,7 +267,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
309 if ((broadcast_flags & BROADCAST_REGISTERED) && err) 267 if ((broadcast_flags & BROADCAST_REGISTERED) && err)
310 err = err2; 268 err = err2;
311 } 269 }
312 pfkey_unlock_table(); 270 rcu_read_unlock();
313 271
314 if (one_sk != NULL) 272 if (one_sk != NULL)
315 err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); 273 err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
@@ -691,7 +649,7 @@ static struct xfrm_state *pfkey_xfrm_state_lookup(struct net *net, struct sadb_
691 if (!xaddr) 649 if (!xaddr)
692 return NULL; 650 return NULL;
693 651
694 return xfrm_state_lookup(net, xaddr, sa->sadb_sa_spi, proto, family); 652 return xfrm_state_lookup(net, DUMMY_MARK, xaddr, sa->sadb_sa_spi, proto, family);
695} 653}
696 654
697#define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1))) 655#define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1)))
@@ -1360,7 +1318,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
1360 } 1318 }
1361 1319
1362 if (hdr->sadb_msg_seq) { 1320 if (hdr->sadb_msg_seq) {
1363 x = xfrm_find_acq_byseq(net, hdr->sadb_msg_seq); 1321 x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq);
1364 if (x && xfrm_addr_cmp(&x->id.daddr, xdaddr, family)) { 1322 if (x && xfrm_addr_cmp(&x->id.daddr, xdaddr, family)) {
1365 xfrm_state_put(x); 1323 xfrm_state_put(x);
1366 x = NULL; 1324 x = NULL;
@@ -1368,7 +1326,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
1368 } 1326 }
1369 1327
1370 if (!x) 1328 if (!x)
1371 x = xfrm_find_acq(net, mode, reqid, proto, xdaddr, xsaddr, 1, family); 1329 x = xfrm_find_acq(net, &dummy_mark, mode, reqid, proto, xdaddr, xsaddr, 1, family);
1372 1330
1373 if (x == NULL) 1331 if (x == NULL)
1374 return -ENOENT; 1332 return -ENOENT;
@@ -1417,7 +1375,7 @@ static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, struct sadb_msg *
1417 if (hdr->sadb_msg_seq == 0 || hdr->sadb_msg_errno == 0) 1375 if (hdr->sadb_msg_seq == 0 || hdr->sadb_msg_errno == 0)
1418 return 0; 1376 return 0;
1419 1377
1420 x = xfrm_find_acq_byseq(net, hdr->sadb_msg_seq); 1378 x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq);
1421 if (x == NULL) 1379 if (x == NULL)
1422 return 0; 1380 return 0;
1423 1381
@@ -1712,6 +1670,23 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, struct sadb_msg
1712 return 0; 1670 return 0;
1713} 1671}
1714 1672
1673static int unicast_flush_resp(struct sock *sk, struct sadb_msg *ihdr)
1674{
1675 struct sk_buff *skb;
1676 struct sadb_msg *hdr;
1677
1678 skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
1679 if (!skb)
1680 return -ENOBUFS;
1681
1682 hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
1683 memcpy(hdr, ihdr, sizeof(struct sadb_msg));
1684 hdr->sadb_msg_errno = (uint8_t) 0;
1685 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
1686
1687 return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
1688}
1689
1715static int key_notify_sa_flush(struct km_event *c) 1690static int key_notify_sa_flush(struct km_event *c)
1716{ 1691{
1717 struct sk_buff *skb; 1692 struct sk_buff *skb;
@@ -1740,7 +1715,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hd
1740 unsigned proto; 1715 unsigned proto;
1741 struct km_event c; 1716 struct km_event c;
1742 struct xfrm_audit audit_info; 1717 struct xfrm_audit audit_info;
1743 int err; 1718 int err, err2;
1744 1719
1745 proto = pfkey_satype2proto(hdr->sadb_msg_satype); 1720 proto = pfkey_satype2proto(hdr->sadb_msg_satype);
1746 if (proto == 0) 1721 if (proto == 0)
@@ -1750,8 +1725,13 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hd
1750 audit_info.sessionid = audit_get_sessionid(current); 1725 audit_info.sessionid = audit_get_sessionid(current);
1751 audit_info.secid = 0; 1726 audit_info.secid = 0;
1752 err = xfrm_state_flush(net, proto, &audit_info); 1727 err = xfrm_state_flush(net, proto, &audit_info);
1753 if (err) 1728 err2 = unicast_flush_resp(sk, hdr);
1754 return err; 1729 if (err || err2) {
1730 if (err == -ESRCH) /* empty table - go quietly */
1731 err = 0;
1732 return err ? err : err2;
1733 }
1734
1755 c.data.proto = proto; 1735 c.data.proto = proto;
1756 c.seq = hdr->sadb_msg_seq; 1736 c.seq = hdr->sadb_msg_seq;
1757 c.pid = hdr->sadb_msg_pid; 1737 c.pid = hdr->sadb_msg_pid;
@@ -2149,10 +2129,9 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c
2149 int err; 2129 int err;
2150 2130
2151 out_skb = pfkey_xfrm_policy2msg_prep(xp); 2131 out_skb = pfkey_xfrm_policy2msg_prep(xp);
2152 if (IS_ERR(out_skb)) { 2132 if (IS_ERR(out_skb))
2153 err = PTR_ERR(out_skb); 2133 return PTR_ERR(out_skb);
2154 goto out; 2134
2155 }
2156 err = pfkey_xfrm_policy2msg(out_skb, xp, dir); 2135 err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
2157 if (err < 0) 2136 if (err < 0)
2158 return err; 2137 return err;
@@ -2168,7 +2147,6 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c
2168 out_hdr->sadb_msg_seq = c->seq; 2147 out_hdr->sadb_msg_seq = c->seq;
2169 out_hdr->sadb_msg_pid = c->pid; 2148 out_hdr->sadb_msg_pid = c->pid;
2170 pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp)); 2149 pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
2171out:
2172 return 0; 2150 return 0;
2173 2151
2174} 2152}
@@ -2346,7 +2324,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
2346 return err; 2324 return err;
2347 } 2325 }
2348 2326
2349 xp = xfrm_policy_bysel_ctx(net, XFRM_POLICY_TYPE_MAIN, 2327 xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, XFRM_POLICY_TYPE_MAIN,
2350 pol->sadb_x_policy_dir - 1, &sel, pol_ctx, 2328 pol->sadb_x_policy_dir - 1, &sel, pol_ctx,
2351 1, &err); 2329 1, &err);
2352 security_xfrm_policy_free(pol_ctx); 2330 security_xfrm_policy_free(pol_ctx);
@@ -2594,8 +2572,8 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
2594 return -EINVAL; 2572 return -EINVAL;
2595 2573
2596 delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2); 2574 delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2);
2597 xp = xfrm_policy_byid(net, XFRM_POLICY_TYPE_MAIN, dir, 2575 xp = xfrm_policy_byid(net, DUMMY_MARK, XFRM_POLICY_TYPE_MAIN,
2598 pol->sadb_x_policy_id, delete, &err); 2576 dir, pol->sadb_x_policy_id, delete, &err);
2599 if (xp == NULL) 2577 if (xp == NULL)
2600 return -ENOENT; 2578 return -ENOENT;
2601 2579
@@ -2706,14 +2684,19 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg
2706 struct net *net = sock_net(sk); 2684 struct net *net = sock_net(sk);
2707 struct km_event c; 2685 struct km_event c;
2708 struct xfrm_audit audit_info; 2686 struct xfrm_audit audit_info;
2709 int err; 2687 int err, err2;
2710 2688
2711 audit_info.loginuid = audit_get_loginuid(current); 2689 audit_info.loginuid = audit_get_loginuid(current);
2712 audit_info.sessionid = audit_get_sessionid(current); 2690 audit_info.sessionid = audit_get_sessionid(current);
2713 audit_info.secid = 0; 2691 audit_info.secid = 0;
2714 err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); 2692 err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2715 if (err) 2693 err2 = unicast_flush_resp(sk, hdr);
2694 if (err || err2) {
2695 if (err == -ESRCH) /* empty table - old silent behavior */
2696 return 0;
2716 return err; 2697 return err;
2698 }
2699
2717 c.data.type = XFRM_POLICY_TYPE_MAIN; 2700 c.data.type = XFRM_POLICY_TYPE_MAIN;
2718 c.event = XFRM_MSG_FLUSHPOLICY; 2701 c.event = XFRM_MSG_FLUSHPOLICY;
2719 c.pid = hdr->sadb_msg_pid; 2702 c.pid = hdr->sadb_msg_pid;
@@ -3019,12 +3002,11 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_e
3019static u32 get_acqseq(void) 3002static u32 get_acqseq(void)
3020{ 3003{
3021 u32 res; 3004 u32 res;
3022 static u32 acqseq; 3005 static atomic_t acqseq;
3023 static DEFINE_SPINLOCK(acqseq_lock);
3024 3006
3025 spin_lock_bh(&acqseq_lock); 3007 do {
3026 res = (++acqseq ? : ++acqseq); 3008 res = atomic_inc_return(&acqseq);
3027 spin_unlock_bh(&acqseq_lock); 3009 } while (!res);
3028 return res; 3010 return res;
3029} 3011}
3030 3012
@@ -3655,9 +3637,8 @@ static const struct net_proto_family pfkey_family_ops = {
3655#ifdef CONFIG_PROC_FS 3637#ifdef CONFIG_PROC_FS
3656static int pfkey_seq_show(struct seq_file *f, void *v) 3638static int pfkey_seq_show(struct seq_file *f, void *v)
3657{ 3639{
3658 struct sock *s; 3640 struct sock *s = sk_entry(v);
3659 3641
3660 s = (struct sock *)v;
3661 if (v == SEQ_START_TOKEN) 3642 if (v == SEQ_START_TOKEN)
3662 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n"); 3643 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
3663 else 3644 else
@@ -3676,19 +3657,9 @@ static void *pfkey_seq_start(struct seq_file *f, loff_t *ppos)
3676{ 3657{
3677 struct net *net = seq_file_net(f); 3658 struct net *net = seq_file_net(f);
3678 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 3659 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
3679 struct sock *s;
3680 struct hlist_node *node;
3681 loff_t pos = *ppos;
3682
3683 read_lock(&pfkey_table_lock);
3684 if (pos == 0)
3685 return SEQ_START_TOKEN;
3686 3660
3687 sk_for_each(s, node, &net_pfkey->table) 3661 rcu_read_lock();
3688 if (pos-- == 1) 3662 return seq_hlist_start_head_rcu(&net_pfkey->table, *ppos);
3689 return s;
3690
3691 return NULL;
3692} 3663}
3693 3664
3694static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos) 3665static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos)
@@ -3696,15 +3667,12 @@ static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos)
3696 struct net *net = seq_file_net(f); 3667 struct net *net = seq_file_net(f);
3697 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 3668 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
3698 3669
3699 ++*ppos; 3670 return seq_hlist_next_rcu(v, &net_pfkey->table, ppos);
3700 return (v == SEQ_START_TOKEN) ?
3701 sk_head(&net_pfkey->table) :
3702 sk_next((struct sock *)v);
3703} 3671}
3704 3672
3705static void pfkey_seq_stop(struct seq_file *f, void *v) 3673static void pfkey_seq_stop(struct seq_file *f, void *v)
3706{ 3674{
3707 read_unlock(&pfkey_table_lock); 3675 rcu_read_unlock();
3708} 3676}
3709 3677
3710static const struct seq_operations pfkey_seq_ops = { 3678static const struct seq_operations pfkey_seq_ops = {
@@ -3738,17 +3706,17 @@ static int __net_init pfkey_init_proc(struct net *net)
3738 return 0; 3706 return 0;
3739} 3707}
3740 3708
3741static void pfkey_exit_proc(struct net *net) 3709static void __net_exit pfkey_exit_proc(struct net *net)
3742{ 3710{
3743 proc_net_remove(net, "pfkey"); 3711 proc_net_remove(net, "pfkey");
3744} 3712}
3745#else 3713#else
3746static int __net_init pfkey_init_proc(struct net *net) 3714static inline int pfkey_init_proc(struct net *net)
3747{ 3715{
3748 return 0; 3716 return 0;
3749} 3717}
3750 3718
3751static void pfkey_exit_proc(struct net *net) 3719static inline void pfkey_exit_proc(struct net *net)
3752{ 3720{
3753} 3721}
3754#endif 3722#endif
@@ -3794,9 +3762,9 @@ static struct pernet_operations pfkey_net_ops = {
3794 3762
3795static void __exit ipsec_pfkey_exit(void) 3763static void __exit ipsec_pfkey_exit(void)
3796{ 3764{
3797 unregister_pernet_subsys(&pfkey_net_ops);
3798 xfrm_unregister_km(&pfkeyv2_mgr); 3765 xfrm_unregister_km(&pfkeyv2_mgr);
3799 sock_unregister(PF_KEY); 3766 sock_unregister(PF_KEY);
3767 unregister_pernet_subsys(&pfkey_net_ops);
3800 proto_unregister(&key_proto); 3768 proto_unregister(&key_proto);
3801} 3769}
3802 3770
@@ -3807,21 +3775,22 @@ static int __init ipsec_pfkey_init(void)
3807 if (err != 0) 3775 if (err != 0)
3808 goto out; 3776 goto out;
3809 3777
3810 err = sock_register(&pfkey_family_ops); 3778 err = register_pernet_subsys(&pfkey_net_ops);
3811 if (err != 0) 3779 if (err != 0)
3812 goto out_unregister_key_proto; 3780 goto out_unregister_key_proto;
3781 err = sock_register(&pfkey_family_ops);
3782 if (err != 0)
3783 goto out_unregister_pernet;
3813 err = xfrm_register_km(&pfkeyv2_mgr); 3784 err = xfrm_register_km(&pfkeyv2_mgr);
3814 if (err != 0) 3785 if (err != 0)
3815 goto out_sock_unregister; 3786 goto out_sock_unregister;
3816 err = register_pernet_subsys(&pfkey_net_ops);
3817 if (err != 0)
3818 goto out_xfrm_unregister_km;
3819out: 3787out:
3820 return err; 3788 return err;
3821out_xfrm_unregister_km: 3789
3822 xfrm_unregister_km(&pfkeyv2_mgr);
3823out_sock_unregister: 3790out_sock_unregister:
3824 sock_unregister(PF_KEY); 3791 sock_unregister(PF_KEY);
3792out_unregister_pernet:
3793 unregister_pernet_subsys(&pfkey_net_ops);
3825out_unregister_key_proto: 3794out_unregister_key_proto:
3826 proto_unregister(&key_proto); 3795 proto_unregister(&key_proto);
3827 goto out; 3796 goto out;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 3a66546cad06..e35d907fba2c 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -47,6 +47,10 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout);
47#define dprintk(args...) 47#define dprintk(args...)
48#endif 48#endif
49 49
50/* Maybe we'll add some more in the future. */
51#define LLC_CMSG_PKTINFO 1
52
53
50/** 54/**
51 * llc_ui_next_link_no - return the next unused link number for a sap 55 * llc_ui_next_link_no - return the next unused link number for a sap
52 * @sap: Address of sap to get link number from. 56 * @sap: Address of sap to get link number from.
@@ -136,6 +140,7 @@ static struct proto llc_proto = {
136 .name = "LLC", 140 .name = "LLC",
137 .owner = THIS_MODULE, 141 .owner = THIS_MODULE,
138 .obj_size = sizeof(struct llc_sock), 142 .obj_size = sizeof(struct llc_sock),
143 .slab_flags = SLAB_DESTROY_BY_RCU,
139}; 144};
140 145
141/** 146/**
@@ -192,10 +197,8 @@ static int llc_ui_release(struct socket *sock)
192 llc->laddr.lsap, llc->daddr.lsap); 197 llc->laddr.lsap, llc->daddr.lsap);
193 if (!llc_send_disc(sk)) 198 if (!llc_send_disc(sk))
194 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); 199 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
195 if (!sock_flag(sk, SOCK_ZAPPED)) { 200 if (!sock_flag(sk, SOCK_ZAPPED))
196 llc_sap_put(llc->sap);
197 llc_sap_remove_socket(llc->sap, sk); 201 llc_sap_remove_socket(llc->sap, sk);
198 }
199 release_sock(sk); 202 release_sock(sk);
200 if (llc->dev) 203 if (llc->dev)
201 dev_put(llc->dev); 204 dev_put(llc->dev);
@@ -255,7 +258,14 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
255 if (!sock_flag(sk, SOCK_ZAPPED)) 258 if (!sock_flag(sk, SOCK_ZAPPED))
256 goto out; 259 goto out;
257 rc = -ENODEV; 260 rc = -ENODEV;
258 llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd); 261 if (sk->sk_bound_dev_if) {
262 llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
263 if (llc->dev && addr->sllc_arphrd != llc->dev->type) {
264 dev_put(llc->dev);
265 llc->dev = NULL;
266 }
267 } else
268 llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd);
259 if (!llc->dev) 269 if (!llc->dev)
260 goto out; 270 goto out;
261 rc = -EUSERS; 271 rc = -EUSERS;
@@ -306,7 +316,25 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
306 goto out; 316 goto out;
307 rc = -ENODEV; 317 rc = -ENODEV;
308 rtnl_lock(); 318 rtnl_lock();
309 llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd, addr->sllc_mac); 319 if (sk->sk_bound_dev_if) {
320 llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
321 if (llc->dev) {
322 if (!addr->sllc_arphrd)
323 addr->sllc_arphrd = llc->dev->type;
324 if (llc_mac_null(addr->sllc_mac))
325 memcpy(addr->sllc_mac, llc->dev->dev_addr,
326 IFHWADDRLEN);
327 if (addr->sllc_arphrd != llc->dev->type ||
328 !llc_mac_match(addr->sllc_mac,
329 llc->dev->dev_addr)) {
330 rc = -EINVAL;
331 dev_put(llc->dev);
332 llc->dev = NULL;
333 }
334 }
335 } else
336 llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd,
337 addr->sllc_mac);
310 rtnl_unlock(); 338 rtnl_unlock();
311 if (!llc->dev) 339 if (!llc->dev)
312 goto out; 340 goto out;
@@ -322,7 +350,6 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
322 rc = -EBUSY; /* some other network layer is using the sap */ 350 rc = -EBUSY; /* some other network layer is using the sap */
323 if (!sap) 351 if (!sap)
324 goto out; 352 goto out;
325 llc_sap_hold(sap);
326 } else { 353 } else {
327 struct llc_addr laddr, daddr; 354 struct llc_addr laddr, daddr;
328 struct sock *ask; 355 struct sock *ask;
@@ -591,6 +618,20 @@ static int llc_wait_data(struct sock *sk, long timeo)
591 return rc; 618 return rc;
592} 619}
593 620
621static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
622{
623 struct llc_sock *llc = llc_sk(skb->sk);
624
625 if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
626 struct llc_pktinfo info;
627
628 info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
629 llc_pdu_decode_dsap(skb, &info.lpi_sap);
630 llc_pdu_decode_da(skb, info.lpi_mac);
631 put_cmsg(msg, SOL_LLC, LLC_OPT_PKTINFO, sizeof(info), &info);
632 }
633}
634
594/** 635/**
595 * llc_ui_accept - accept a new incoming connection. 636 * llc_ui_accept - accept a new incoming connection.
596 * @sock: Socket which connections arrive on. 637 * @sock: Socket which connections arrive on.
@@ -812,6 +853,8 @@ copy_uaddr:
812 memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr)); 853 memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
813 msg->msg_namelen = sizeof(*uaddr); 854 msg->msg_namelen = sizeof(*uaddr);
814 } 855 }
856 if (llc_sk(sk)->cmsg_flags)
857 llc_cmsg_rcv(msg, skb);
815 goto out; 858 goto out;
816} 859}
817 860
@@ -1030,6 +1073,12 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
1030 goto out; 1073 goto out;
1031 llc->rw = opt; 1074 llc->rw = opt;
1032 break; 1075 break;
1076 case LLC_OPT_PKTINFO:
1077 if (opt)
1078 llc->cmsg_flags |= LLC_CMSG_PKTINFO;
1079 else
1080 llc->cmsg_flags &= ~LLC_CMSG_PKTINFO;
1081 break;
1033 default: 1082 default:
1034 rc = -ENOPROTOOPT; 1083 rc = -ENOPROTOOPT;
1035 goto out; 1084 goto out;
@@ -1083,6 +1132,9 @@ static int llc_ui_getsockopt(struct socket *sock, int level, int optname,
1083 val = llc->k; break; 1132 val = llc->k; break;
1084 case LLC_OPT_RX_WIN: 1133 case LLC_OPT_RX_WIN:
1085 val = llc->rw; break; 1134 val = llc->rw; break;
1135 case LLC_OPT_PKTINFO:
1136 val = (llc->cmsg_flags & LLC_CMSG_PKTINFO) != 0;
1137 break;
1086 default: 1138 default:
1087 rc = -ENOPROTOOPT; 1139 rc = -ENOPROTOOPT;
1088 goto out; 1140 goto out;
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index 019c780512e8..86d6985b9d49 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -1437,7 +1437,7 @@ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb)
1437 llc_conn_state_process(sk, skb); 1437 llc_conn_state_process(sk, skb);
1438 else { 1438 else {
1439 llc_set_backlog_type(skb, LLC_EVENT); 1439 llc_set_backlog_type(skb, LLC_EVENT);
1440 sk_add_backlog(sk, skb); 1440 __sk_add_backlog(sk, skb);
1441 } 1441 }
1442 } 1442 }
1443} 1443}
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index c6bab39b018e..a12144da7974 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -468,6 +468,19 @@ static int llc_exec_conn_trans_actions(struct sock *sk,
468 return rc; 468 return rc;
469} 469}
470 470
471static inline bool llc_estab_match(const struct llc_sap *sap,
472 const struct llc_addr *daddr,
473 const struct llc_addr *laddr,
474 const struct sock *sk)
475{
476 struct llc_sock *llc = llc_sk(sk);
477
478 return llc->laddr.lsap == laddr->lsap &&
479 llc->daddr.lsap == daddr->lsap &&
480 llc_mac_match(llc->laddr.mac, laddr->mac) &&
481 llc_mac_match(llc->daddr.mac, daddr->mac);
482}
483
471/** 484/**
472 * __llc_lookup_established - Finds connection for the remote/local sap/mac 485 * __llc_lookup_established - Finds connection for the remote/local sap/mac
473 * @sap: SAP 486 * @sap: SAP
@@ -484,23 +497,35 @@ static struct sock *__llc_lookup_established(struct llc_sap *sap,
484 struct llc_addr *laddr) 497 struct llc_addr *laddr)
485{ 498{
486 struct sock *rc; 499 struct sock *rc;
487 struct hlist_node *node; 500 struct hlist_nulls_node *node;
488 501 int slot = llc_sk_laddr_hashfn(sap, laddr);
489 read_lock(&sap->sk_list.lock); 502 struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
490 sk_for_each(rc, node, &sap->sk_list.list) { 503
491 struct llc_sock *llc = llc_sk(rc); 504 rcu_read_lock();
492 505again:
493 if (llc->laddr.lsap == laddr->lsap && 506 sk_nulls_for_each_rcu(rc, node, laddr_hb) {
494 llc->daddr.lsap == daddr->lsap && 507 if (llc_estab_match(sap, daddr, laddr, rc)) {
495 llc_mac_match(llc->laddr.mac, laddr->mac) && 508 /* Extra checks required by SLAB_DESTROY_BY_RCU */
496 llc_mac_match(llc->daddr.mac, daddr->mac)) { 509 if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
497 sock_hold(rc); 510 goto again;
511 if (unlikely(llc_sk(rc)->sap != sap ||
512 !llc_estab_match(sap, daddr, laddr, rc))) {
513 sock_put(rc);
514 continue;
515 }
498 goto found; 516 goto found;
499 } 517 }
500 } 518 }
501 rc = NULL; 519 rc = NULL;
520 /*
521 * if the nulls value we got at the end of this lookup is
522 * not the expected one, we must restart lookup.
523 * We probably met an item that was moved to another chain.
524 */
525 if (unlikely(get_nulls_value(node) != slot))
526 goto again;
502found: 527found:
503 read_unlock(&sap->sk_list.lock); 528 rcu_read_unlock();
504 return rc; 529 return rc;
505} 530}
506 531
@@ -516,6 +541,53 @@ struct sock *llc_lookup_established(struct llc_sap *sap,
516 return sk; 541 return sk;
517} 542}
518 543
544static inline bool llc_listener_match(const struct llc_sap *sap,
545 const struct llc_addr *laddr,
546 const struct sock *sk)
547{
548 struct llc_sock *llc = llc_sk(sk);
549
550 return sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN &&
551 llc->laddr.lsap == laddr->lsap &&
552 llc_mac_match(llc->laddr.mac, laddr->mac);
553}
554
555static struct sock *__llc_lookup_listener(struct llc_sap *sap,
556 struct llc_addr *laddr)
557{
558 struct sock *rc;
559 struct hlist_nulls_node *node;
560 int slot = llc_sk_laddr_hashfn(sap, laddr);
561 struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
562
563 rcu_read_lock();
564again:
565 sk_nulls_for_each_rcu(rc, node, laddr_hb) {
566 if (llc_listener_match(sap, laddr, rc)) {
567 /* Extra checks required by SLAB_DESTROY_BY_RCU */
568 if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
569 goto again;
570 if (unlikely(llc_sk(rc)->sap != sap ||
571 !llc_listener_match(sap, laddr, rc))) {
572 sock_put(rc);
573 continue;
574 }
575 goto found;
576 }
577 }
578 rc = NULL;
579 /*
580 * if the nulls value we got at the end of this lookup is
581 * not the expected one, we must restart lookup.
582 * We probably met an item that was moved to another chain.
583 */
584 if (unlikely(get_nulls_value(node) != slot))
585 goto again;
586found:
587 rcu_read_unlock();
588 return rc;
589}
590
519/** 591/**
520 * llc_lookup_listener - Finds listener for local MAC + SAP 592 * llc_lookup_listener - Finds listener for local MAC + SAP
521 * @sap: SAP 593 * @sap: SAP
@@ -529,24 +601,12 @@ struct sock *llc_lookup_established(struct llc_sap *sap,
529static struct sock *llc_lookup_listener(struct llc_sap *sap, 601static struct sock *llc_lookup_listener(struct llc_sap *sap,
530 struct llc_addr *laddr) 602 struct llc_addr *laddr)
531{ 603{
532 struct sock *rc; 604 static struct llc_addr null_addr;
533 struct hlist_node *node; 605 struct sock *rc = __llc_lookup_listener(sap, laddr);
534 606
535 read_lock(&sap->sk_list.lock); 607 if (!rc)
536 sk_for_each(rc, node, &sap->sk_list.list) { 608 rc = __llc_lookup_listener(sap, &null_addr);
537 struct llc_sock *llc = llc_sk(rc);
538 609
539 if (rc->sk_type == SOCK_STREAM && rc->sk_state == TCP_LISTEN &&
540 llc->laddr.lsap == laddr->lsap &&
541 (llc_mac_match(llc->laddr.mac, laddr->mac) ||
542 llc_mac_null(llc->laddr.mac))) {
543 sock_hold(rc);
544 goto found;
545 }
546 }
547 rc = NULL;
548found:
549 read_unlock(&sap->sk_list.lock);
550 return rc; 610 return rc;
551} 611}
552 612
@@ -647,15 +707,22 @@ static int llc_find_offset(int state, int ev_type)
647 * @sap: SAP 707 * @sap: SAP
648 * @sk: socket 708 * @sk: socket
649 * 709 *
650 * This function adds a socket to sk_list of a SAP. 710 * This function adds a socket to the hash tables of a SAP.
651 */ 711 */
652void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk) 712void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
653{ 713{
714 struct llc_sock *llc = llc_sk(sk);
715 struct hlist_head *dev_hb = llc_sk_dev_hash(sap, llc->dev->ifindex);
716 struct hlist_nulls_head *laddr_hb = llc_sk_laddr_hash(sap, &llc->laddr);
717
654 llc_sap_hold(sap); 718 llc_sap_hold(sap);
655 write_lock_bh(&sap->sk_list.lock);
656 llc_sk(sk)->sap = sap; 719 llc_sk(sk)->sap = sap;
657 sk_add_node(sk, &sap->sk_list.list); 720
658 write_unlock_bh(&sap->sk_list.lock); 721 spin_lock_bh(&sap->sk_lock);
722 sap->sk_count++;
723 sk_nulls_add_node_rcu(sk, laddr_hb);
724 hlist_add_head(&llc->dev_hash_node, dev_hb);
725 spin_unlock_bh(&sap->sk_lock);
659} 726}
660 727
661/** 728/**
@@ -663,14 +730,18 @@ void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
663 * @sap: SAP 730 * @sap: SAP
664 * @sk: socket 731 * @sk: socket
665 * 732 *
666 * This function removes a connection from sk_list.list of a SAP if 733 * This function removes a connection from the hash tables of a SAP if
667 * the connection was in this list. 734 * the connection was in this list.
668 */ 735 */
669void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk) 736void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk)
670{ 737{
671 write_lock_bh(&sap->sk_list.lock); 738 struct llc_sock *llc = llc_sk(sk);
672 sk_del_node_init(sk); 739
673 write_unlock_bh(&sap->sk_list.lock); 740 spin_lock_bh(&sap->sk_lock);
741 sk_nulls_del_node_init_rcu(sk);
742 hlist_del(&llc->dev_hash_node);
743 sap->sk_count--;
744 spin_unlock_bh(&sap->sk_lock);
674 llc_sap_put(sap); 745 llc_sap_put(sap);
675} 746}
676 747
@@ -756,7 +827,8 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
756 else { 827 else {
757 dprintk("%s: adding to backlog...\n", __func__); 828 dprintk("%s: adding to backlog...\n", __func__);
758 llc_set_backlog_type(skb, LLC_PACKET); 829 llc_set_backlog_type(skb, LLC_PACKET);
759 sk_add_backlog(sk, skb); 830 if (sk_add_backlog(sk, skb))
831 goto drop_unlock;
760 } 832 }
761out: 833out:
762 bh_unlock_sock(sk); 834 bh_unlock_sock(sk);
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index ff4c0ab96a69..78167e81dfeb 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -23,7 +23,7 @@
23#include <net/llc.h> 23#include <net/llc.h>
24 24
25LIST_HEAD(llc_sap_list); 25LIST_HEAD(llc_sap_list);
26DEFINE_RWLOCK(llc_sap_list_lock); 26DEFINE_SPINLOCK(llc_sap_list_lock);
27 27
28/** 28/**
29 * llc_sap_alloc - allocates and initializes sap. 29 * llc_sap_alloc - allocates and initializes sap.
@@ -33,40 +33,19 @@ DEFINE_RWLOCK(llc_sap_list_lock);
33static struct llc_sap *llc_sap_alloc(void) 33static struct llc_sap *llc_sap_alloc(void)
34{ 34{
35 struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC); 35 struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC);
36 int i;
36 37
37 if (sap) { 38 if (sap) {
38 /* sap->laddr.mac - leave as a null, it's filled by bind */ 39 /* sap->laddr.mac - leave as a null, it's filled by bind */
39 sap->state = LLC_SAP_STATE_ACTIVE; 40 sap->state = LLC_SAP_STATE_ACTIVE;
40 rwlock_init(&sap->sk_list.lock); 41 spin_lock_init(&sap->sk_lock);
42 for (i = 0; i < LLC_SK_LADDR_HASH_ENTRIES; i++)
43 INIT_HLIST_NULLS_HEAD(&sap->sk_laddr_hash[i], i);
41 atomic_set(&sap->refcnt, 1); 44 atomic_set(&sap->refcnt, 1);
42 } 45 }
43 return sap; 46 return sap;
44} 47}
45 48
46/**
47 * llc_add_sap - add sap to station list
48 * @sap: Address of the sap
49 *
50 * Adds a sap to the LLC's station sap list.
51 */
52static void llc_add_sap(struct llc_sap *sap)
53{
54 list_add_tail(&sap->node, &llc_sap_list);
55}
56
57/**
58 * llc_del_sap - del sap from station list
59 * @sap: Address of the sap
60 *
61 * Removes a sap to the LLC's station sap list.
62 */
63static void llc_del_sap(struct llc_sap *sap)
64{
65 write_lock_bh(&llc_sap_list_lock);
66 list_del(&sap->node);
67 write_unlock_bh(&llc_sap_list_lock);
68}
69
70static struct llc_sap *__llc_sap_find(unsigned char sap_value) 49static struct llc_sap *__llc_sap_find(unsigned char sap_value)
71{ 50{
72 struct llc_sap* sap; 51 struct llc_sap* sap;
@@ -90,13 +69,13 @@ out:
90 */ 69 */
91struct llc_sap *llc_sap_find(unsigned char sap_value) 70struct llc_sap *llc_sap_find(unsigned char sap_value)
92{ 71{
93 struct llc_sap* sap; 72 struct llc_sap *sap;
94 73
95 read_lock_bh(&llc_sap_list_lock); 74 rcu_read_lock_bh();
96 sap = __llc_sap_find(sap_value); 75 sap = __llc_sap_find(sap_value);
97 if (sap) 76 if (sap)
98 llc_sap_hold(sap); 77 llc_sap_hold(sap);
99 read_unlock_bh(&llc_sap_list_lock); 78 rcu_read_unlock_bh();
100 return sap; 79 return sap;
101} 80}
102 81
@@ -117,7 +96,7 @@ struct llc_sap *llc_sap_open(unsigned char lsap,
117{ 96{
118 struct llc_sap *sap = NULL; 97 struct llc_sap *sap = NULL;
119 98
120 write_lock_bh(&llc_sap_list_lock); 99 spin_lock_bh(&llc_sap_list_lock);
121 if (__llc_sap_find(lsap)) /* SAP already exists */ 100 if (__llc_sap_find(lsap)) /* SAP already exists */
122 goto out; 101 goto out;
123 sap = llc_sap_alloc(); 102 sap = llc_sap_alloc();
@@ -125,9 +104,9 @@ struct llc_sap *llc_sap_open(unsigned char lsap,
125 goto out; 104 goto out;
126 sap->laddr.lsap = lsap; 105 sap->laddr.lsap = lsap;
127 sap->rcv_func = func; 106 sap->rcv_func = func;
128 llc_add_sap(sap); 107 list_add_tail_rcu(&sap->node, &llc_sap_list);
129out: 108out:
130 write_unlock_bh(&llc_sap_list_lock); 109 spin_unlock_bh(&llc_sap_list_lock);
131 return sap; 110 return sap;
132} 111}
133 112
@@ -142,8 +121,14 @@ out:
142 */ 121 */
143void llc_sap_close(struct llc_sap *sap) 122void llc_sap_close(struct llc_sap *sap)
144{ 123{
145 WARN_ON(!hlist_empty(&sap->sk_list.list)); 124 WARN_ON(sap->sk_count);
146 llc_del_sap(sap); 125
126 spin_lock_bh(&llc_sap_list_lock);
127 list_del_rcu(&sap->node);
128 spin_unlock_bh(&llc_sap_list_lock);
129
130 synchronize_rcu();
131
147 kfree(sap); 132 kfree(sap);
148} 133}
149 134
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index 754f4fedc852..b38a1079a98e 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -33,48 +33,19 @@
33int llc_mac_hdr_init(struct sk_buff *skb, 33int llc_mac_hdr_init(struct sk_buff *skb,
34 const unsigned char *sa, const unsigned char *da) 34 const unsigned char *sa, const unsigned char *da)
35{ 35{
36 int rc = 0; 36 int rc = -EINVAL;
37 37
38 switch (skb->dev->type) { 38 switch (skb->dev->type) {
39#ifdef CONFIG_TR 39 case ARPHRD_IEEE802_TR:
40 case ARPHRD_IEEE802_TR: {
41 struct net_device *dev = skb->dev;
42 struct trh_hdr *trh;
43
44 skb_push(skb, sizeof(*trh));
45 skb_reset_mac_header(skb);
46 trh = tr_hdr(skb);
47 trh->ac = AC;
48 trh->fc = LLC_FRAME;
49 if (sa)
50 memcpy(trh->saddr, sa, dev->addr_len);
51 else
52 memset(trh->saddr, 0, dev->addr_len);
53 if (da) {
54 memcpy(trh->daddr, da, dev->addr_len);
55 tr_source_route(skb, trh, dev);
56 skb_reset_mac_header(skb);
57 }
58 break;
59 }
60#endif
61 case ARPHRD_ETHER: 40 case ARPHRD_ETHER:
62 case ARPHRD_LOOPBACK: { 41 case ARPHRD_LOOPBACK:
63 unsigned short len = skb->len; 42 rc = dev_hard_header(skb, skb->dev, ETH_P_802_2, da, sa,
64 struct ethhdr *eth; 43 skb->len);
65 44 if (rc > 0)
66 skb_push(skb, sizeof(*eth)); 45 rc = 0;
67 skb_reset_mac_header(skb);
68 eth = eth_hdr(skb);
69 eth->h_proto = htons(len);
70 memcpy(eth->h_dest, da, ETH_ALEN);
71 memcpy(eth->h_source, sa, ETH_ALEN);
72 break; 46 break;
73 }
74 default: 47 default:
75 printk(KERN_WARNING "device type not supported: %d\n", 48 WARN(1, "device type not supported: %d\n", skb->dev->type);
76 skb->dev->type);
77 rc = -EINVAL;
78 } 49 }
79 return rc; 50 return rc;
80} 51}
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index be47ac427f6b..7af1ff2d1f19 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -32,21 +32,23 @@ static void llc_ui_format_mac(struct seq_file *seq, u8 *addr)
32 32
33static struct sock *llc_get_sk_idx(loff_t pos) 33static struct sock *llc_get_sk_idx(loff_t pos)
34{ 34{
35 struct list_head *sap_entry;
36 struct llc_sap *sap; 35 struct llc_sap *sap;
37 struct hlist_node *node;
38 struct sock *sk = NULL; 36 struct sock *sk = NULL;
39 37 int i;
40 list_for_each(sap_entry, &llc_sap_list) { 38
41 sap = list_entry(sap_entry, struct llc_sap, node); 39 list_for_each_entry_rcu(sap, &llc_sap_list, node) {
42 40 spin_lock_bh(&sap->sk_lock);
43 read_lock_bh(&sap->sk_list.lock); 41 for (i = 0; i < LLC_SK_LADDR_HASH_ENTRIES; i++) {
44 sk_for_each(sk, node, &sap->sk_list.list) { 42 struct hlist_nulls_head *head = &sap->sk_laddr_hash[i];
45 if (!pos) 43 struct hlist_nulls_node *node;
46 goto found; 44
47 --pos; 45 sk_nulls_for_each(sk, node, head) {
46 if (!pos)
47 goto found; /* keep the lock */
48 --pos;
49 }
48 } 50 }
49 read_unlock_bh(&sap->sk_list.lock); 51 spin_unlock_bh(&sap->sk_lock);
50 } 52 }
51 sk = NULL; 53 sk = NULL;
52found: 54found:
@@ -57,10 +59,23 @@ static void *llc_seq_start(struct seq_file *seq, loff_t *pos)
57{ 59{
58 loff_t l = *pos; 60 loff_t l = *pos;
59 61
60 read_lock_bh(&llc_sap_list_lock); 62 rcu_read_lock_bh();
61 return l ? llc_get_sk_idx(--l) : SEQ_START_TOKEN; 63 return l ? llc_get_sk_idx(--l) : SEQ_START_TOKEN;
62} 64}
63 65
66static struct sock *laddr_hash_next(struct llc_sap *sap, int bucket)
67{
68 struct hlist_nulls_node *node;
69 struct sock *sk = NULL;
70
71 while (++bucket < LLC_SK_LADDR_HASH_ENTRIES)
72 sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket])
73 goto out;
74
75out:
76 return sk;
77}
78
64static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos) 79static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
65{ 80{
66 struct sock* sk, *next; 81 struct sock* sk, *next;
@@ -73,25 +88,23 @@ static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
73 goto out; 88 goto out;
74 } 89 }
75 sk = v; 90 sk = v;
76 next = sk_next(sk); 91 next = sk_nulls_next(sk);
77 if (next) { 92 if (next) {
78 sk = next; 93 sk = next;
79 goto out; 94 goto out;
80 } 95 }
81 llc = llc_sk(sk); 96 llc = llc_sk(sk);
82 sap = llc->sap; 97 sap = llc->sap;
83 read_unlock_bh(&sap->sk_list.lock); 98 sk = laddr_hash_next(sap, llc_sk_laddr_hashfn(sap, &llc->laddr));
84 sk = NULL; 99 if (sk)
85 for (;;) { 100 goto out;
86 if (sap->node.next == &llc_sap_list) 101 spin_unlock_bh(&sap->sk_lock);
87 break; 102 list_for_each_entry_continue_rcu(sap, &llc_sap_list, node) {
88 sap = list_entry(sap->node.next, struct llc_sap, node); 103 spin_lock_bh(&sap->sk_lock);
89 read_lock_bh(&sap->sk_list.lock); 104 sk = laddr_hash_next(sap, -1);
90 if (!hlist_empty(&sap->sk_list.list)) { 105 if (sk)
91 sk = sk_head(&sap->sk_list.list); 106 break; /* keep the lock */
92 break; 107 spin_unlock_bh(&sap->sk_lock);
93 }
94 read_unlock_bh(&sap->sk_list.lock);
95 } 108 }
96out: 109out:
97 return sk; 110 return sk;
@@ -104,9 +117,9 @@ static void llc_seq_stop(struct seq_file *seq, void *v)
104 struct llc_sock *llc = llc_sk(sk); 117 struct llc_sock *llc = llc_sk(sk);
105 struct llc_sap *sap = llc->sap; 118 struct llc_sap *sap = llc->sap;
106 119
107 read_unlock_bh(&sap->sk_list.lock); 120 spin_unlock_bh(&sap->sk_lock);
108 } 121 }
109 read_unlock_bh(&llc_sap_list_lock); 122 rcu_read_unlock_bh();
110} 123}
111 124
112static int llc_seq_socket_show(struct seq_file *seq, void *v) 125static int llc_seq_socket_show(struct seq_file *seq, void *v)
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 008de1fc42ca..ad6e6e1cf22f 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -297,6 +297,17 @@ static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
297 llc_sap_state_process(sap, skb); 297 llc_sap_state_process(sap, skb);
298} 298}
299 299
300static inline bool llc_dgram_match(const struct llc_sap *sap,
301 const struct llc_addr *laddr,
302 const struct sock *sk)
303{
304 struct llc_sock *llc = llc_sk(sk);
305
306 return sk->sk_type == SOCK_DGRAM &&
307 llc->laddr.lsap == laddr->lsap &&
308 llc_mac_match(llc->laddr.mac, laddr->mac);
309}
310
300/** 311/**
301 * llc_lookup_dgram - Finds dgram socket for the local sap/mac 312 * llc_lookup_dgram - Finds dgram socket for the local sap/mac
302 * @sap: SAP 313 * @sap: SAP
@@ -309,25 +320,68 @@ static struct sock *llc_lookup_dgram(struct llc_sap *sap,
309 const struct llc_addr *laddr) 320 const struct llc_addr *laddr)
310{ 321{
311 struct sock *rc; 322 struct sock *rc;
312 struct hlist_node *node; 323 struct hlist_nulls_node *node;
313 324 int slot = llc_sk_laddr_hashfn(sap, laddr);
314 read_lock_bh(&sap->sk_list.lock); 325 struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
315 sk_for_each(rc, node, &sap->sk_list.list) { 326
316 struct llc_sock *llc = llc_sk(rc); 327 rcu_read_lock_bh();
317 328again:
318 if (rc->sk_type == SOCK_DGRAM && 329 sk_nulls_for_each_rcu(rc, node, laddr_hb) {
319 llc->laddr.lsap == laddr->lsap && 330 if (llc_dgram_match(sap, laddr, rc)) {
320 llc_mac_match(llc->laddr.mac, laddr->mac)) { 331 /* Extra checks required by SLAB_DESTROY_BY_RCU */
321 sock_hold(rc); 332 if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
333 goto again;
334 if (unlikely(llc_sk(rc)->sap != sap ||
335 !llc_dgram_match(sap, laddr, rc))) {
336 sock_put(rc);
337 continue;
338 }
322 goto found; 339 goto found;
323 } 340 }
324 } 341 }
325 rc = NULL; 342 rc = NULL;
343 /*
344 * if the nulls value we got at the end of this lookup is
345 * not the expected one, we must restart lookup.
346 * We probably met an item that was moved to another chain.
347 */
348 if (unlikely(get_nulls_value(node) != slot))
349 goto again;
326found: 350found:
327 read_unlock_bh(&sap->sk_list.lock); 351 rcu_read_unlock_bh();
328 return rc; 352 return rc;
329} 353}
330 354
355static inline bool llc_mcast_match(const struct llc_sap *sap,
356 const struct llc_addr *laddr,
357 const struct sk_buff *skb,
358 const struct sock *sk)
359{
360 struct llc_sock *llc = llc_sk(sk);
361
362 return sk->sk_type == SOCK_DGRAM &&
363 llc->laddr.lsap == laddr->lsap &&
364 llc->dev == skb->dev;
365}
366
367static void llc_do_mcast(struct llc_sap *sap, struct sk_buff *skb,
368 struct sock **stack, int count)
369{
370 struct sk_buff *skb1;
371 int i;
372
373 for (i = 0; i < count; i++) {
374 skb1 = skb_clone(skb, GFP_ATOMIC);
375 if (!skb1) {
376 sock_put(stack[i]);
377 continue;
378 }
379
380 llc_sap_rcv(sap, skb1, stack[i]);
381 sock_put(stack[i]);
382 }
383}
384
331/** 385/**
332 * llc_sap_mcast - Deliver multicast PDU's to all matching datagram sockets. 386 * llc_sap_mcast - Deliver multicast PDU's to all matching datagram sockets.
333 * @sap: SAP 387 * @sap: SAP
@@ -340,32 +394,31 @@ static void llc_sap_mcast(struct llc_sap *sap,
340 const struct llc_addr *laddr, 394 const struct llc_addr *laddr,
341 struct sk_buff *skb) 395 struct sk_buff *skb)
342{ 396{
343 struct sock *sk; 397 int i = 0, count = 256 / sizeof(struct sock *);
398 struct sock *sk, *stack[count];
344 struct hlist_node *node; 399 struct hlist_node *node;
400 struct llc_sock *llc;
401 struct hlist_head *dev_hb = llc_sk_dev_hash(sap, skb->dev->ifindex);
345 402
346 read_lock_bh(&sap->sk_list.lock); 403 spin_lock_bh(&sap->sk_lock);
347 sk_for_each(sk, node, &sap->sk_list.list) { 404 hlist_for_each_entry(llc, node, dev_hb, dev_hash_node) {
348 struct llc_sock *llc = llc_sk(sk);
349 struct sk_buff *skb1;
350 405
351 if (sk->sk_type != SOCK_DGRAM) 406 sk = &llc->sk;
352 continue;
353 407
354 if (llc->laddr.lsap != laddr->lsap) 408 if (!llc_mcast_match(sap, laddr, skb, sk))
355 continue; 409 continue;
356 410
357 if (llc->dev != skb->dev)
358 continue;
359
360 skb1 = skb_clone(skb, GFP_ATOMIC);
361 if (!skb1)
362 break;
363
364 sock_hold(sk); 411 sock_hold(sk);
365 llc_sap_rcv(sap, skb1, sk); 412 if (i < count)
366 sock_put(sk); 413 stack[i++] = sk;
414 else {
415 llc_do_mcast(sap, skb, stack, i);
416 i = 0;
417 }
367 } 418 }
368 read_unlock_bh(&sap->sk_list.lock); 419 spin_unlock_bh(&sap->sk_lock);
420
421 llc_do_mcast(sap, skb, stack, i);
369} 422}
370 423
371 424
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index a10d508b07e1..a952b7f8c648 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -96,18 +96,6 @@ menuconfig MAC80211_DEBUG_MENU
96 ---help--- 96 ---help---
97 This option collects various mac80211 debug settings. 97 This option collects various mac80211 debug settings.
98 98
99config MAC80211_DEBUG_PACKET_ALIGNMENT
100 bool "Enable packet alignment debugging"
101 depends on MAC80211_DEBUG_MENU
102 ---help---
103 This option is recommended for driver authors and strongly
104 discouraged for everybody else, it will trigger a warning
105 when a driver hands mac80211 a buffer that is aligned in
106 a way that will cause problems with the IP stack on some
107 architectures.
108
109 Say N unless you're writing a mac80211 based driver.
110
111config MAC80211_NOINLINE 99config MAC80211_NOINLINE
112 bool "Do not inline TX/RX handlers" 100 bool "Do not inline TX/RX handlers"
113 depends on MAC80211_DEBUG_MENU 101 depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 298cfcc1bf8d..04420291e7ad 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -6,10 +6,10 @@ mac80211-y := \
6 sta_info.o \ 6 sta_info.o \
7 wep.o \ 7 wep.o \
8 wpa.o \ 8 wpa.o \
9 scan.o \ 9 scan.o offchannel.o \
10 ht.o agg-tx.o agg-rx.o \ 10 ht.o agg-tx.o agg-rx.o \
11 ibss.o \ 11 ibss.o \
12 mlme.o \ 12 mlme.o work.o \
13 iface.o \ 13 iface.o \
14 rate.o \ 14 rate.o \
15 michael.o \ 15 michael.o \
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 51c7dc3c4c3b..a978e666ed6f 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -41,8 +41,7 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
41 sta->sta.addr, tid); 41 sta->sta.addr, tid);
42#endif /* CONFIG_MAC80211_HT_DEBUG */ 42#endif /* CONFIG_MAC80211_HT_DEBUG */
43 43
44 if (drv_ampdu_action(local, &sta->sdata->vif, 44 if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
45 IEEE80211_AMPDU_RX_STOP,
46 &sta->sta, tid, NULL)) 45 &sta->sta, tid, NULL))
47 printk(KERN_DEBUG "HW problem - can not stop rx " 46 printk(KERN_DEBUG "HW problem - can not stop rx "
48 "aggregation for tid %d\n", tid); 47 "aggregation for tid %d\n", tid);
@@ -83,12 +82,11 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
83void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, 82void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid,
84 u16 initiator, u16 reason) 83 u16 initiator, u16 reason)
85{ 84{
86 struct ieee80211_local *local = sdata->local;
87 struct sta_info *sta; 85 struct sta_info *sta;
88 86
89 rcu_read_lock(); 87 rcu_read_lock();
90 88
91 sta = sta_info_get(local, ra); 89 sta = sta_info_get(sdata, ra);
92 if (!sta) { 90 if (!sta) {
93 rcu_read_unlock(); 91 rcu_read_unlock();
94 return; 92 return;
@@ -136,7 +134,7 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
136 134
137 if (!skb) { 135 if (!skb) {
138 printk(KERN_DEBUG "%s: failed to allocate buffer " 136 printk(KERN_DEBUG "%s: failed to allocate buffer "
139 "for addba resp frame\n", sdata->dev->name); 137 "for addba resp frame\n", sdata->name);
140 return; 138 return;
141 } 139 }
142 140
@@ -144,10 +142,10 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
144 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 142 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
145 memset(mgmt, 0, 24); 143 memset(mgmt, 0, 24);
146 memcpy(mgmt->da, da, ETH_ALEN); 144 memcpy(mgmt->da, da, ETH_ALEN);
147 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 145 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
148 if (sdata->vif.type == NL80211_IFTYPE_AP || 146 if (sdata->vif.type == NL80211_IFTYPE_AP ||
149 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 147 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
150 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 148 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
151 else if (sdata->vif.type == NL80211_IFTYPE_STATION) 149 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
152 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); 150 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
153 151
@@ -281,8 +279,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
281 goto end; 279 goto end;
282 } 280 }
283 281
284 ret = drv_ampdu_action(local, &sta->sdata->vif, 282 ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
285 IEEE80211_AMPDU_RX_START,
286 &sta->sta, tid, &start_seq_num); 283 &sta->sta, tid, &start_seq_num);
287#ifdef CONFIG_MAC80211_HT_DEBUG 284#ifdef CONFIG_MAC80211_HT_DEBUG
288 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret); 285 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 5e3a7eccef5a..5538e1b4a697 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -58,17 +58,17 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
58 58
59 if (!skb) { 59 if (!skb) {
60 printk(KERN_ERR "%s: failed to allocate buffer " 60 printk(KERN_ERR "%s: failed to allocate buffer "
61 "for addba request frame\n", sdata->dev->name); 61 "for addba request frame\n", sdata->name);
62 return; 62 return;
63 } 63 }
64 skb_reserve(skb, local->hw.extra_tx_headroom); 64 skb_reserve(skb, local->hw.extra_tx_headroom);
65 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 65 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
66 memset(mgmt, 0, 24); 66 memset(mgmt, 0, 24);
67 memcpy(mgmt->da, da, ETH_ALEN); 67 memcpy(mgmt->da, da, ETH_ALEN);
68 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 68 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
69 if (sdata->vif.type == NL80211_IFTYPE_AP || 69 if (sdata->vif.type == NL80211_IFTYPE_AP ||
70 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 70 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
71 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 71 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
72 else if (sdata->vif.type == NL80211_IFTYPE_STATION) 72 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
73 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); 73 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
74 74
@@ -104,7 +104,7 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
104 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); 104 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
105 if (!skb) { 105 if (!skb) {
106 printk(KERN_ERR "%s: failed to allocate buffer for " 106 printk(KERN_ERR "%s: failed to allocate buffer for "
107 "bar frame\n", sdata->dev->name); 107 "bar frame\n", sdata->name);
108 return; 108 return;
109 } 109 }
110 skb_reserve(skb, local->hw.extra_tx_headroom); 110 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -113,7 +113,7 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
113 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | 113 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
114 IEEE80211_STYPE_BACK_REQ); 114 IEEE80211_STYPE_BACK_REQ);
115 memcpy(bar->ra, ra, ETH_ALEN); 115 memcpy(bar->ra, ra, ETH_ALEN);
116 memcpy(bar->ta, sdata->dev->dev_addr, ETH_ALEN); 116 memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
117 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; 117 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
118 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; 118 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
119 bar_control |= (u16)(tid << 12); 119 bar_control |= (u16)(tid << 12);
@@ -144,7 +144,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
144 *state = HT_AGG_STATE_REQ_STOP_BA_MSK | 144 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
145 (initiator << HT_AGG_STATE_INITIATOR_SHIFT); 145 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
146 146
147 ret = drv_ampdu_action(local, &sta->sdata->vif, 147 ret = drv_ampdu_action(local, sta->sdata,
148 IEEE80211_AMPDU_TX_STOP, 148 IEEE80211_AMPDU_TX_STOP,
149 &sta->sta, tid, NULL); 149 &sta->sta, tid, NULL);
150 150
@@ -179,7 +179,8 @@ static void sta_addba_resp_timer_expired(unsigned long data)
179 179
180 /* check if the TID waits for addBA response */ 180 /* check if the TID waits for addBA response */
181 spin_lock_bh(&sta->lock); 181 spin_lock_bh(&sta->lock);
182 if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK)) != 182 if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK |
183 HT_AGG_STATE_REQ_STOP_BA_MSK)) !=
183 HT_ADDBA_REQUESTED_MSK) { 184 HT_ADDBA_REQUESTED_MSK) {
184 spin_unlock_bh(&sta->lock); 185 spin_unlock_bh(&sta->lock);
185 *state = HT_AGG_STATE_IDLE; 186 *state = HT_AGG_STATE_IDLE;
@@ -236,6 +237,14 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
236 sdata->vif.type != NL80211_IFTYPE_AP) 237 sdata->vif.type != NL80211_IFTYPE_AP)
237 return -EINVAL; 238 return -EINVAL;
238 239
240 if (test_sta_flags(sta, WLAN_STA_DISASSOC)) {
241#ifdef CONFIG_MAC80211_HT_DEBUG
242 printk(KERN_DEBUG "Disassociation is in progress. "
243 "Denying BA session request\n");
244#endif
245 return -EINVAL;
246 }
247
239 if (test_sta_flags(sta, WLAN_STA_SUSPEND)) { 248 if (test_sta_flags(sta, WLAN_STA_SUSPEND)) {
240#ifdef CONFIG_MAC80211_HT_DEBUG 249#ifdef CONFIG_MAC80211_HT_DEBUG
241 printk(KERN_DEBUG "Suspend in progress. " 250 printk(KERN_DEBUG "Suspend in progress. "
@@ -301,10 +310,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
301 * call back right away, it must see that the flow has begun */ 310 * call back right away, it must see that the flow has begun */
302 *state |= HT_ADDBA_REQUESTED_MSK; 311 *state |= HT_ADDBA_REQUESTED_MSK;
303 312
304 start_seq_num = sta->tid_seq[tid]; 313 start_seq_num = sta->tid_seq[tid] >> 4;
305 314
306 ret = drv_ampdu_action(local, &sdata->vif, 315 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
307 IEEE80211_AMPDU_TX_START,
308 pubsta, tid, &start_seq_num); 316 pubsta, tid, &start_seq_num);
309 317
310 if (ret) { 318 if (ret) {
@@ -420,7 +428,7 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
420 ieee80211_agg_splice_finish(local, sta, tid); 428 ieee80211_agg_splice_finish(local, sta, tid);
421 spin_unlock(&local->ampdu_lock); 429 spin_unlock(&local->ampdu_lock);
422 430
423 drv_ampdu_action(local, &sta->sdata->vif, 431 drv_ampdu_action(local, sta->sdata,
424 IEEE80211_AMPDU_TX_OPERATIONAL, 432 IEEE80211_AMPDU_TX_OPERATIONAL,
425 &sta->sta, tid, NULL); 433 &sta->sta, tid, NULL);
426} 434}
@@ -441,7 +449,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
441 } 449 }
442 450
443 rcu_read_lock(); 451 rcu_read_lock();
444 sta = sta_info_get(local, ra); 452 sta = sta_info_get(sdata, ra);
445 if (!sta) { 453 if (!sta) {
446 rcu_read_unlock(); 454 rcu_read_unlock();
447#ifdef CONFIG_MAC80211_HT_DEBUG 455#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -489,7 +497,7 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
489#ifdef CONFIG_MAC80211_HT_DEBUG 497#ifdef CONFIG_MAC80211_HT_DEBUG
490 if (net_ratelimit()) 498 if (net_ratelimit())
491 printk(KERN_WARNING "%s: Not enough memory, " 499 printk(KERN_WARNING "%s: Not enough memory, "
492 "dropping start BA session", skb->dev->name); 500 "dropping start BA session", sdata->name);
493#endif 501#endif
494 return; 502 return;
495 } 503 }
@@ -564,7 +572,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
564#endif /* CONFIG_MAC80211_HT_DEBUG */ 572#endif /* CONFIG_MAC80211_HT_DEBUG */
565 573
566 rcu_read_lock(); 574 rcu_read_lock();
567 sta = sta_info_get(local, ra); 575 sta = sta_info_get(sdata, ra);
568 if (!sta) { 576 if (!sta) {
569#ifdef CONFIG_MAC80211_HT_DEBUG 577#ifdef CONFIG_MAC80211_HT_DEBUG
570 printk(KERN_DEBUG "Could not find station: %pM\n", ra); 578 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
@@ -621,7 +629,7 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
621#ifdef CONFIG_MAC80211_HT_DEBUG 629#ifdef CONFIG_MAC80211_HT_DEBUG
622 if (net_ratelimit()) 630 if (net_ratelimit())
623 printk(KERN_WARNING "%s: Not enough memory, " 631 printk(KERN_WARNING "%s: Not enough memory, "
624 "dropping stop BA session", skb->dev->name); 632 "dropping stop BA session", sdata->name);
625#endif 633#endif
626 return; 634 return;
627 } 635 }
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 6dc3579c0ac5..b7116ef84a3b 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * mac80211 configuration hooks for cfg80211 2 * mac80211 configuration hooks for cfg80211
3 * 3 *
4 * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
5 * 5 *
6 * This file is GPLv2 as found in COPYING. 6 * This file is GPLv2 as found in COPYING.
7 */ 7 */
@@ -78,17 +78,15 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
78 enum nl80211_iftype type, u32 *flags, 78 enum nl80211_iftype type, u32 *flags,
79 struct vif_params *params) 79 struct vif_params *params)
80{ 80{
81 struct ieee80211_sub_if_data *sdata; 81 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
82 int ret; 82 int ret;
83 83
84 if (netif_running(dev)) 84 if (ieee80211_sdata_running(sdata))
85 return -EBUSY; 85 return -EBUSY;
86 86
87 if (!nl80211_params_check(type, params)) 87 if (!nl80211_params_check(type, params))
88 return -EINVAL; 88 return -EINVAL;
89 89
90 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
91
92 ret = ieee80211_if_change_type(sdata, type); 90 ret = ieee80211_if_change_type(sdata, type);
93 if (ret) 91 if (ret)
94 return ret; 92 return ret;
@@ -150,7 +148,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
150 rcu_read_lock(); 148 rcu_read_lock();
151 149
152 if (mac_addr) { 150 if (mac_addr) {
153 sta = sta_info_get(sdata->local, mac_addr); 151 sta = sta_info_get_bss(sdata, mac_addr);
154 if (!sta) { 152 if (!sta) {
155 ieee80211_key_free(key); 153 ieee80211_key_free(key);
156 err = -ENOENT; 154 err = -ENOENT;
@@ -181,7 +179,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
181 if (mac_addr) { 179 if (mac_addr) {
182 ret = -ENOENT; 180 ret = -ENOENT;
183 181
184 sta = sta_info_get(sdata->local, mac_addr); 182 sta = sta_info_get_bss(sdata, mac_addr);
185 if (!sta) 183 if (!sta)
186 goto out_unlock; 184 goto out_unlock;
187 185
@@ -228,7 +226,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
228 rcu_read_lock(); 226 rcu_read_lock();
229 227
230 if (mac_addr) { 228 if (mac_addr) {
231 sta = sta_info_get(sdata->local, mac_addr); 229 sta = sta_info_get_bss(sdata, mac_addr);
232 if (!sta) 230 if (!sta)
233 goto out; 231 goto out;
234 232
@@ -415,15 +413,13 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
415static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, 413static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
416 u8 *mac, struct station_info *sinfo) 414 u8 *mac, struct station_info *sinfo)
417{ 415{
418 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 416 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
419 struct sta_info *sta; 417 struct sta_info *sta;
420 int ret = -ENOENT; 418 int ret = -ENOENT;
421 419
422 rcu_read_lock(); 420 rcu_read_lock();
423 421
424 /* XXX: verify sta->dev == dev */ 422 sta = sta_info_get_bss(sdata, mac);
425
426 sta = sta_info_get(local, mac);
427 if (sta) { 423 if (sta) {
428 ret = 0; 424 ret = 0;
429 sta_set_sinfo(sta, sinfo); 425 sta_set_sinfo(sta, sinfo);
@@ -519,6 +515,8 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
519 if (old) 515 if (old)
520 memcpy(new->tail, old->tail, new_tail_len); 516 memcpy(new->tail, old->tail, new_tail_len);
521 517
518 sdata->vif.bss_conf.dtim_period = new->dtim_period;
519
522 rcu_assign_pointer(sdata->u.ap.beacon, new); 520 rcu_assign_pointer(sdata->u.ap.beacon, new);
523 521
524 synchronize_rcu(); 522 synchronize_rcu();
@@ -732,7 +730,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
732 } else 730 } else
733 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 731 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
734 732
735 if (compare_ether_addr(mac, dev->dev_addr) == 0) 733 if (compare_ether_addr(mac, sdata->vif.addr) == 0)
736 return -EINVAL; 734 return -EINVAL;
737 735
738 if (is_multicast_ether_addr(mac)) 736 if (is_multicast_ether_addr(mac))
@@ -751,9 +749,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
751 layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 749 layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
752 sdata->vif.type == NL80211_IFTYPE_AP; 750 sdata->vif.type == NL80211_IFTYPE_AP;
753 751
754 rcu_read_lock(); 752 err = sta_info_insert_rcu(sta);
755
756 err = sta_info_insert(sta);
757 if (err) { 753 if (err) {
758 rcu_read_unlock(); 754 rcu_read_unlock();
759 return err; 755 return err;
@@ -772,27 +768,13 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
772{ 768{
773 struct ieee80211_local *local = wiphy_priv(wiphy); 769 struct ieee80211_local *local = wiphy_priv(wiphy);
774 struct ieee80211_sub_if_data *sdata; 770 struct ieee80211_sub_if_data *sdata;
775 struct sta_info *sta;
776 771
777 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 772 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
778 773
779 if (mac) { 774 if (mac)
780 rcu_read_lock(); 775 return sta_info_destroy_addr_bss(sdata, mac);
781
782 /* XXX: get sta belonging to dev */
783 sta = sta_info_get(local, mac);
784 if (!sta) {
785 rcu_read_unlock();
786 return -ENOENT;
787 }
788
789 sta_info_unlink(&sta);
790 rcu_read_unlock();
791
792 sta_info_destroy(sta);
793 } else
794 sta_info_flush(local, sdata);
795 776
777 sta_info_flush(local, sdata);
796 return 0; 778 return 0;
797} 779}
798 780
@@ -801,14 +783,14 @@ static int ieee80211_change_station(struct wiphy *wiphy,
801 u8 *mac, 783 u8 *mac,
802 struct station_parameters *params) 784 struct station_parameters *params)
803{ 785{
786 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
804 struct ieee80211_local *local = wiphy_priv(wiphy); 787 struct ieee80211_local *local = wiphy_priv(wiphy);
805 struct sta_info *sta; 788 struct sta_info *sta;
806 struct ieee80211_sub_if_data *vlansdata; 789 struct ieee80211_sub_if_data *vlansdata;
807 790
808 rcu_read_lock(); 791 rcu_read_lock();
809 792
810 /* XXX: get sta belonging to dev */ 793 sta = sta_info_get_bss(sdata, mac);
811 sta = sta_info_get(local, mac);
812 if (!sta) { 794 if (!sta) {
813 rcu_read_unlock(); 795 rcu_read_unlock();
814 return -ENOENT; 796 return -ENOENT;
@@ -847,7 +829,6 @@ static int ieee80211_change_station(struct wiphy *wiphy,
847static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, 829static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
848 u8 *dst, u8 *next_hop) 830 u8 *dst, u8 *next_hop)
849{ 831{
850 struct ieee80211_local *local = wiphy_priv(wiphy);
851 struct ieee80211_sub_if_data *sdata; 832 struct ieee80211_sub_if_data *sdata;
852 struct mesh_path *mpath; 833 struct mesh_path *mpath;
853 struct sta_info *sta; 834 struct sta_info *sta;
@@ -856,7 +837,7 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
856 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 837 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
857 838
858 rcu_read_lock(); 839 rcu_read_lock();
859 sta = sta_info_get(local, next_hop); 840 sta = sta_info_get(sdata, next_hop);
860 if (!sta) { 841 if (!sta) {
861 rcu_read_unlock(); 842 rcu_read_unlock();
862 return -ENOENT; 843 return -ENOENT;
@@ -895,7 +876,6 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
895 struct net_device *dev, 876 struct net_device *dev,
896 u8 *dst, u8 *next_hop) 877 u8 *dst, u8 *next_hop)
897{ 878{
898 struct ieee80211_local *local = wiphy_priv(wiphy);
899 struct ieee80211_sub_if_data *sdata; 879 struct ieee80211_sub_if_data *sdata;
900 struct mesh_path *mpath; 880 struct mesh_path *mpath;
901 struct sta_info *sta; 881 struct sta_info *sta;
@@ -904,7 +884,7 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
904 884
905 rcu_read_lock(); 885 rcu_read_lock();
906 886
907 sta = sta_info_get(local, next_hop); 887 sta = sta_info_get(sdata, next_hop);
908 if (!sta) { 888 if (!sta) {
909 rcu_read_unlock(); 889 rcu_read_unlock();
910 return -ENOENT; 890 return -ENOENT;
@@ -1092,6 +1072,13 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
1092 params->use_short_preamble; 1072 params->use_short_preamble;
1093 changed |= BSS_CHANGED_ERP_PREAMBLE; 1073 changed |= BSS_CHANGED_ERP_PREAMBLE;
1094 } 1074 }
1075
1076 if (!sdata->vif.bss_conf.use_short_slot &&
1077 sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) {
1078 sdata->vif.bss_conf.use_short_slot = true;
1079 changed |= BSS_CHANGED_ERP_SLOT;
1080 }
1081
1095 if (params->use_short_slot_time >= 0) { 1082 if (params->use_short_slot_time >= 0) {
1096 sdata->vif.bss_conf.use_short_slot = 1083 sdata->vif.bss_conf.use_short_slot =
1097 params->use_short_slot_time; 1084 params->use_short_slot_time;
@@ -1135,6 +1122,13 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1135 p.cw_max = params->cwmax; 1122 p.cw_max = params->cwmax;
1136 p.cw_min = params->cwmin; 1123 p.cw_min = params->cwmin;
1137 p.txop = params->txop; 1124 p.txop = params->txop;
1125
1126 /*
1127 * Setting tx queue params disables u-apsd because it's only
1128 * called in master mode.
1129 */
1130 p.uapsd = false;
1131
1138 if (drv_conf_tx(local, params->queue, &p)) { 1132 if (drv_conf_tx(local, params->queue, &p)) {
1139 printk(KERN_DEBUG "%s: failed to set TX queue " 1133 printk(KERN_DEBUG "%s: failed to set TX queue "
1140 "parameters for queue %d\n", 1134 "parameters for queue %d\n",
@@ -1237,6 +1231,13 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
1237 struct ieee80211_local *local = wiphy_priv(wiphy); 1231 struct ieee80211_local *local = wiphy_priv(wiphy);
1238 int err; 1232 int err;
1239 1233
1234 if (changed & WIPHY_PARAM_COVERAGE_CLASS) {
1235 err = drv_set_coverage_class(local, wiphy->coverage_class);
1236
1237 if (err)
1238 return err;
1239 }
1240
1240 if (changed & WIPHY_PARAM_RTS_THRESHOLD) { 1241 if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
1241 err = drv_set_rts_threshold(local, wiphy->rts_threshold); 1242 err = drv_set_rts_threshold(local, wiphy->rts_threshold);
1242 1243
@@ -1324,6 +1325,50 @@ static int ieee80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
1324} 1325}
1325#endif 1326#endif
1326 1327
1328int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
1329 enum ieee80211_smps_mode smps_mode)
1330{
1331 const u8 *ap;
1332 enum ieee80211_smps_mode old_req;
1333 int err;
1334
1335 old_req = sdata->u.mgd.req_smps;
1336 sdata->u.mgd.req_smps = smps_mode;
1337
1338 if (old_req == smps_mode &&
1339 smps_mode != IEEE80211_SMPS_AUTOMATIC)
1340 return 0;
1341
1342 /*
1343 * If not associated, or current association is not an HT
1344 * association, there's no need to send an action frame.
1345 */
1346 if (!sdata->u.mgd.associated ||
1347 sdata->local->oper_channel_type == NL80211_CHAN_NO_HT) {
1348 mutex_lock(&sdata->local->iflist_mtx);
1349 ieee80211_recalc_smps(sdata->local, sdata);
1350 mutex_unlock(&sdata->local->iflist_mtx);
1351 return 0;
1352 }
1353
1354 ap = sdata->u.mgd.associated->bssid;
1355
1356 if (smps_mode == IEEE80211_SMPS_AUTOMATIC) {
1357 if (sdata->u.mgd.powersave)
1358 smps_mode = IEEE80211_SMPS_DYNAMIC;
1359 else
1360 smps_mode = IEEE80211_SMPS_OFF;
1361 }
1362
1363 /* send SM PS frame to AP */
1364 err = ieee80211_send_smps_action(sdata, smps_mode,
1365 ap, ap);
1366 if (err)
1367 sdata->u.mgd.req_smps = old_req;
1368
1369 return err;
1370}
1371
1327static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, 1372static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
1328 bool enabled, int timeout) 1373 bool enabled, int timeout)
1329{ 1374{
@@ -1331,6 +1376,9 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
1331 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1376 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1332 struct ieee80211_conf *conf = &local->hw.conf; 1377 struct ieee80211_conf *conf = &local->hw.conf;
1333 1378
1379 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1380 return -EOPNOTSUPP;
1381
1334 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) 1382 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
1335 return -EOPNOTSUPP; 1383 return -EOPNOTSUPP;
1336 1384
@@ -1341,6 +1389,11 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
1341 sdata->u.mgd.powersave = enabled; 1389 sdata->u.mgd.powersave = enabled;
1342 conf->dynamic_ps_timeout = timeout; 1390 conf->dynamic_ps_timeout = timeout;
1343 1391
1392 /* no change, but if automatic follow powersave */
1393 mutex_lock(&sdata->u.mgd.mtx);
1394 __ieee80211_request_smps(sdata, sdata->u.mgd.req_smps);
1395 mutex_unlock(&sdata->u.mgd.mtx);
1396
1344 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) 1397 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
1345 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 1398 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
1346 1399
@@ -1356,39 +1409,52 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
1356{ 1409{
1357 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1410 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1358 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1411 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1359 int i, err = -EINVAL; 1412 int i;
1360 u32 target_rate;
1361 struct ieee80211_supported_band *sband;
1362 1413
1363 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1414 /*
1415 * This _could_ be supported by providing a hook for
1416 * drivers for this function, but at this point it
1417 * doesn't seem worth bothering.
1418 */
1419 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
1420 return -EOPNOTSUPP;
1364 1421
1365 /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates
1366 * target_rate = X, rate->fixed = 1 means only rate X
1367 * target_rate = X, rate->fixed = 0 means all rates <= X */
1368 sdata->max_ratectrl_rateidx = -1;
1369 sdata->force_unicast_rateidx = -1;
1370 1422
1371 if (mask->fixed) 1423 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
1372 target_rate = mask->fixed / 100; 1424 sdata->rc_rateidx_mask[i] = mask->control[i].legacy;
1373 else if (mask->maxrate)
1374 target_rate = mask->maxrate / 100;
1375 else
1376 return 0;
1377 1425
1378 for (i=0; i< sband->n_bitrates; i++) { 1426 return 0;
1379 struct ieee80211_rate *brate = &sband->bitrates[i]; 1427}
1380 int this_rate = brate->bitrate;
1381 1428
1382 if (target_rate == this_rate) { 1429static int ieee80211_remain_on_channel(struct wiphy *wiphy,
1383 sdata->max_ratectrl_rateidx = i; 1430 struct net_device *dev,
1384 if (mask->fixed) 1431 struct ieee80211_channel *chan,
1385 sdata->force_unicast_rateidx = i; 1432 enum nl80211_channel_type channel_type,
1386 err = 0; 1433 unsigned int duration,
1387 break; 1434 u64 *cookie)
1388 } 1435{
1389 } 1436 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1390 1437
1391 return err; 1438 return ieee80211_wk_remain_on_channel(sdata, chan, channel_type,
1439 duration, cookie);
1440}
1441
1442static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
1443 struct net_device *dev,
1444 u64 cookie)
1445{
1446 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1447
1448 return ieee80211_wk_cancel_remain_on_channel(sdata, cookie);
1449}
1450
1451static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev,
1452 struct ieee80211_channel *chan,
1453 enum nl80211_channel_type channel_type,
1454 const u8 *buf, size_t len, u64 *cookie)
1455{
1456 return ieee80211_mgd_action(IEEE80211_DEV_TO_SUB_IF(dev), chan,
1457 channel_type, buf, len, cookie);
1392} 1458}
1393 1459
1394struct cfg80211_ops mac80211_config_ops = { 1460struct cfg80211_ops mac80211_config_ops = {
@@ -1437,4 +1503,7 @@ struct cfg80211_ops mac80211_config_ops = {
1437 CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd) 1503 CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd)
1438 .set_power_mgmt = ieee80211_set_power_mgmt, 1504 .set_power_mgmt = ieee80211_set_power_mgmt,
1439 .set_bitrate_mask = ieee80211_set_bitrate_mask, 1505 .set_bitrate_mask = ieee80211_set_bitrate_mask,
1506 .remain_on_channel = ieee80211_remain_on_channel,
1507 .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel,
1508 .action = ieee80211_action,
1440}; 1509};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index e4b54093d41b..637929b65ccc 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -158,6 +158,130 @@ static const struct file_operations noack_ops = {
158 .open = mac80211_open_file_generic 158 .open = mac80211_open_file_generic
159}; 159};
160 160
161static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf,
162 size_t count, loff_t *ppos)
163{
164 struct ieee80211_local *local = file->private_data;
165 int res;
166 char buf[10];
167
168 res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_queues);
169
170 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
171}
172
173static ssize_t uapsd_queues_write(struct file *file,
174 const char __user *user_buf,
175 size_t count, loff_t *ppos)
176{
177 struct ieee80211_local *local = file->private_data;
178 unsigned long val;
179 char buf[10];
180 size_t len;
181 int ret;
182
183 len = min(count, sizeof(buf) - 1);
184 if (copy_from_user(buf, user_buf, len))
185 return -EFAULT;
186 buf[len] = '\0';
187
188 ret = strict_strtoul(buf, 0, &val);
189
190 if (ret)
191 return -EINVAL;
192
193 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
194 return -ERANGE;
195
196 local->uapsd_queues = val;
197
198 return count;
199}
200
201static const struct file_operations uapsd_queues_ops = {
202 .read = uapsd_queues_read,
203 .write = uapsd_queues_write,
204 .open = mac80211_open_file_generic
205};
206
207static ssize_t uapsd_max_sp_len_read(struct file *file, char __user *user_buf,
208 size_t count, loff_t *ppos)
209{
210 struct ieee80211_local *local = file->private_data;
211 int res;
212 char buf[10];
213
214 res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_max_sp_len);
215
216 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
217}
218
219static ssize_t uapsd_max_sp_len_write(struct file *file,
220 const char __user *user_buf,
221 size_t count, loff_t *ppos)
222{
223 struct ieee80211_local *local = file->private_data;
224 unsigned long val;
225 char buf[10];
226 size_t len;
227 int ret;
228
229 len = min(count, sizeof(buf) - 1);
230 if (copy_from_user(buf, user_buf, len))
231 return -EFAULT;
232 buf[len] = '\0';
233
234 ret = strict_strtoul(buf, 0, &val);
235
236 if (ret)
237 return -EINVAL;
238
239 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
240 return -ERANGE;
241
242 local->uapsd_max_sp_len = val;
243
244 return count;
245}
246
247static const struct file_operations uapsd_max_sp_len_ops = {
248 .read = uapsd_max_sp_len_read,
249 .write = uapsd_max_sp_len_write,
250 .open = mac80211_open_file_generic
251};
252
253static ssize_t channel_type_read(struct file *file, char __user *user_buf,
254 size_t count, loff_t *ppos)
255{
256 struct ieee80211_local *local = file->private_data;
257 const char *buf;
258
259 switch (local->hw.conf.channel_type) {
260 case NL80211_CHAN_NO_HT:
261 buf = "no ht\n";
262 break;
263 case NL80211_CHAN_HT20:
264 buf = "ht20\n";
265 break;
266 case NL80211_CHAN_HT40MINUS:
267 buf = "ht40-\n";
268 break;
269 case NL80211_CHAN_HT40PLUS:
270 buf = "ht40+\n";
271 break;
272 default:
273 buf = "???";
274 break;
275 }
276
277 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
278}
279
280static const struct file_operations channel_type_ops = {
281 .read = channel_type_read,
282 .open = mac80211_open_file_generic
283};
284
161static ssize_t queues_read(struct file *file, char __user *user_buf, 285static ssize_t queues_read(struct file *file, char __user *user_buf,
162 size_t count, loff_t *ppos) 286 size_t count, loff_t *ppos)
163{ 287{
@@ -314,6 +438,9 @@ void debugfs_hw_add(struct ieee80211_local *local)
314 DEBUGFS_ADD(queues); 438 DEBUGFS_ADD(queues);
315 DEBUGFS_ADD_MODE(reset, 0200); 439 DEBUGFS_ADD_MODE(reset, 0200);
316 DEBUGFS_ADD(noack); 440 DEBUGFS_ADD(noack);
441 DEBUGFS_ADD(uapsd_queues);
442 DEBUGFS_ADD(uapsd_max_sp_len);
443 DEBUGFS_ADD(channel_type);
317 444
318 statsd = debugfs_create_dir("statistics", phyd); 445 statsd = debugfs_create_dir("statistics", phyd);
319 446
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index e0f5224630da..d12e743cb4e1 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -56,7 +56,7 @@ KEY_CONF_FILE(keyidx, D);
56KEY_CONF_FILE(hw_key_idx, D); 56KEY_CONF_FILE(hw_key_idx, D);
57KEY_FILE(flags, X); 57KEY_FILE(flags, X);
58KEY_FILE(tx_rx_count, D); 58KEY_FILE(tx_rx_count, D);
59KEY_READ(ifindex, sdata->dev->ifindex, 20, "%d\n"); 59KEY_READ(ifindex, sdata->name, IFNAMSIZ + 2, "%s\n");
60KEY_OPS(ifindex); 60KEY_OPS(ifindex);
61 61
62static ssize_t key_algorithm_read(struct file *file, 62static ssize_t key_algorithm_read(struct file *file,
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 472b2039906c..b4ddb2f83914 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -41,6 +41,34 @@ static ssize_t ieee80211_if_read(
41 return ret; 41 return ret;
42} 42}
43 43
44static ssize_t ieee80211_if_write(
45 struct ieee80211_sub_if_data *sdata,
46 const char __user *userbuf,
47 size_t count, loff_t *ppos,
48 ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int))
49{
50 u8 *buf;
51 ssize_t ret;
52
53 buf = kmalloc(count, GFP_KERNEL);
54 if (!buf)
55 return -ENOMEM;
56
57 ret = -EFAULT;
58 if (copy_from_user(buf, userbuf, count))
59 goto freebuf;
60
61 ret = -ENODEV;
62 rtnl_lock();
63 if (sdata->dev->reg_state == NETREG_REGISTERED)
64 ret = (*write)(sdata, buf, count);
65 rtnl_unlock();
66
67freebuf:
68 kfree(buf);
69 return ret;
70}
71
44#define IEEE80211_IF_FMT(name, field, format_string) \ 72#define IEEE80211_IF_FMT(name, field, format_string) \
45static ssize_t ieee80211_if_fmt_##name( \ 73static ssize_t ieee80211_if_fmt_##name( \
46 const struct ieee80211_sub_if_data *sdata, char *buf, \ 74 const struct ieee80211_sub_if_data *sdata, char *buf, \
@@ -71,7 +99,7 @@ static ssize_t ieee80211_if_fmt_##name( \
71 return scnprintf(buf, buflen, "%pM\n", sdata->field); \ 99 return scnprintf(buf, buflen, "%pM\n", sdata->field); \
72} 100}
73 101
74#define __IEEE80211_IF_FILE(name) \ 102#define __IEEE80211_IF_FILE(name, _write) \
75static ssize_t ieee80211_if_read_##name(struct file *file, \ 103static ssize_t ieee80211_if_read_##name(struct file *file, \
76 char __user *userbuf, \ 104 char __user *userbuf, \
77 size_t count, loff_t *ppos) \ 105 size_t count, loff_t *ppos) \
@@ -82,22 +110,99 @@ static ssize_t ieee80211_if_read_##name(struct file *file, \
82} \ 110} \
83static const struct file_operations name##_ops = { \ 111static const struct file_operations name##_ops = { \
84 .read = ieee80211_if_read_##name, \ 112 .read = ieee80211_if_read_##name, \
113 .write = (_write), \
85 .open = mac80211_open_file_generic, \ 114 .open = mac80211_open_file_generic, \
86} 115}
87 116
117#define __IEEE80211_IF_FILE_W(name) \
118static ssize_t ieee80211_if_write_##name(struct file *file, \
119 const char __user *userbuf, \
120 size_t count, loff_t *ppos) \
121{ \
122 return ieee80211_if_write(file->private_data, userbuf, count, \
123 ppos, ieee80211_if_parse_##name); \
124} \
125__IEEE80211_IF_FILE(name, ieee80211_if_write_##name)
126
127
88#define IEEE80211_IF_FILE(name, field, format) \ 128#define IEEE80211_IF_FILE(name, field, format) \
89 IEEE80211_IF_FMT_##format(name, field) \ 129 IEEE80211_IF_FMT_##format(name, field) \
90 __IEEE80211_IF_FILE(name) 130 __IEEE80211_IF_FILE(name, NULL)
91 131
92/* common attributes */ 132/* common attributes */
93IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC); 133IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC);
94IEEE80211_IF_FILE(force_unicast_rateidx, force_unicast_rateidx, DEC); 134IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
95IEEE80211_IF_FILE(max_ratectrl_rateidx, max_ratectrl_rateidx, DEC); 135 HEX);
136IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
137 HEX);
96 138
97/* STA attributes */ 139/* STA attributes */
98IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); 140IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
99IEEE80211_IF_FILE(aid, u.mgd.aid, DEC); 141IEEE80211_IF_FILE(aid, u.mgd.aid, DEC);
100IEEE80211_IF_FILE(capab, u.mgd.capab, HEX); 142
143static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
144 enum ieee80211_smps_mode smps_mode)
145{
146 struct ieee80211_local *local = sdata->local;
147 int err;
148
149 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS) &&
150 smps_mode == IEEE80211_SMPS_STATIC)
151 return -EINVAL;
152
153 /* auto should be dynamic if in PS mode */
154 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) &&
155 (smps_mode == IEEE80211_SMPS_DYNAMIC ||
156 smps_mode == IEEE80211_SMPS_AUTOMATIC))
157 return -EINVAL;
158
159 /* supported only on managed interfaces for now */
160 if (sdata->vif.type != NL80211_IFTYPE_STATION)
161 return -EOPNOTSUPP;
162
163 mutex_lock(&local->iflist_mtx);
164 err = __ieee80211_request_smps(sdata, smps_mode);
165 mutex_unlock(&local->iflist_mtx);
166
167 return err;
168}
169
170static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
171 [IEEE80211_SMPS_AUTOMATIC] = "auto",
172 [IEEE80211_SMPS_OFF] = "off",
173 [IEEE80211_SMPS_STATIC] = "static",
174 [IEEE80211_SMPS_DYNAMIC] = "dynamic",
175};
176
177static ssize_t ieee80211_if_fmt_smps(const struct ieee80211_sub_if_data *sdata,
178 char *buf, int buflen)
179{
180 if (sdata->vif.type != NL80211_IFTYPE_STATION)
181 return -EOPNOTSUPP;
182
183 return snprintf(buf, buflen, "request: %s\nused: %s\n",
184 smps_modes[sdata->u.mgd.req_smps],
185 smps_modes[sdata->u.mgd.ap_smps]);
186}
187
188static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata,
189 const char *buf, int buflen)
190{
191 enum ieee80211_smps_mode mode;
192
193 for (mode = 0; mode < IEEE80211_SMPS_NUM_MODES; mode++) {
194 if (strncmp(buf, smps_modes[mode], buflen) == 0) {
195 int err = ieee80211_set_smps(sdata, mode);
196 if (!err)
197 return buflen;
198 return err;
199 }
200 }
201
202 return -EINVAL;
203}
204
205__IEEE80211_IF_FILE_W(smps);
101 206
102/* AP attributes */ 207/* AP attributes */
103IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); 208IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
@@ -109,7 +214,7 @@ static ssize_t ieee80211_if_fmt_num_buffered_multicast(
109 return scnprintf(buf, buflen, "%u\n", 214 return scnprintf(buf, buflen, "%u\n",
110 skb_queue_len(&sdata->u.ap.ps_bc_buf)); 215 skb_queue_len(&sdata->u.ap.ps_bc_buf));
111} 216}
112__IEEE80211_IF_FILE(num_buffered_multicast); 217__IEEE80211_IF_FILE(num_buffered_multicast, NULL);
113 218
114/* WDS attributes */ 219/* WDS attributes */
115IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC); 220IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
@@ -154,46 +259,50 @@ IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
154#endif 259#endif
155 260
156 261
157#define DEBUGFS_ADD(name, type) \ 262#define DEBUGFS_ADD(name) \
158 debugfs_create_file(#name, 0400, sdata->debugfs.dir, \ 263 debugfs_create_file(#name, 0400, sdata->debugfs.dir, \
159 sdata, &name##_ops); 264 sdata, &name##_ops);
160 265
266#define DEBUGFS_ADD_MODE(name, mode) \
267 debugfs_create_file(#name, mode, sdata->debugfs.dir, \
268 sdata, &name##_ops);
269
161static void add_sta_files(struct ieee80211_sub_if_data *sdata) 270static void add_sta_files(struct ieee80211_sub_if_data *sdata)
162{ 271{
163 DEBUGFS_ADD(drop_unencrypted, sta); 272 DEBUGFS_ADD(drop_unencrypted);
164 DEBUGFS_ADD(force_unicast_rateidx, sta); 273 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
165 DEBUGFS_ADD(max_ratectrl_rateidx, sta); 274 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
166 275
167 DEBUGFS_ADD(bssid, sta); 276 DEBUGFS_ADD(bssid);
168 DEBUGFS_ADD(aid, sta); 277 DEBUGFS_ADD(aid);
169 DEBUGFS_ADD(capab, sta); 278 DEBUGFS_ADD_MODE(smps, 0600);
170} 279}
171 280
172static void add_ap_files(struct ieee80211_sub_if_data *sdata) 281static void add_ap_files(struct ieee80211_sub_if_data *sdata)
173{ 282{
174 DEBUGFS_ADD(drop_unencrypted, ap); 283 DEBUGFS_ADD(drop_unencrypted);
175 DEBUGFS_ADD(force_unicast_rateidx, ap); 284 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
176 DEBUGFS_ADD(max_ratectrl_rateidx, ap); 285 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
177 286
178 DEBUGFS_ADD(num_sta_ps, ap); 287 DEBUGFS_ADD(num_sta_ps);
179 DEBUGFS_ADD(dtim_count, ap); 288 DEBUGFS_ADD(dtim_count);
180 DEBUGFS_ADD(num_buffered_multicast, ap); 289 DEBUGFS_ADD(num_buffered_multicast);
181} 290}
182 291
183static void add_wds_files(struct ieee80211_sub_if_data *sdata) 292static void add_wds_files(struct ieee80211_sub_if_data *sdata)
184{ 293{
185 DEBUGFS_ADD(drop_unencrypted, wds); 294 DEBUGFS_ADD(drop_unencrypted);
186 DEBUGFS_ADD(force_unicast_rateidx, wds); 295 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
187 DEBUGFS_ADD(max_ratectrl_rateidx, wds); 296 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
188 297
189 DEBUGFS_ADD(peer, wds); 298 DEBUGFS_ADD(peer);
190} 299}
191 300
192static void add_vlan_files(struct ieee80211_sub_if_data *sdata) 301static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
193{ 302{
194 DEBUGFS_ADD(drop_unencrypted, vlan); 303 DEBUGFS_ADD(drop_unencrypted);
195 DEBUGFS_ADD(force_unicast_rateidx, vlan); 304 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
196 DEBUGFS_ADD(max_ratectrl_rateidx, vlan); 305 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
197} 306}
198 307
199static void add_monitor_files(struct ieee80211_sub_if_data *sdata) 308static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
@@ -280,16 +389,11 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
280 } 389 }
281} 390}
282 391
283static int notif_registered;
284
285void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata) 392void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata)
286{ 393{
287 char buf[10+IFNAMSIZ]; 394 char buf[10+IFNAMSIZ];
288 395
289 if (!notif_registered) 396 sprintf(buf, "netdev:%s", sdata->name);
290 return;
291
292 sprintf(buf, "netdev:%s", sdata->dev->name);
293 sdata->debugfs.dir = debugfs_create_dir(buf, 397 sdata->debugfs.dir = debugfs_create_dir(buf,
294 sdata->local->hw.wiphy->debugfsdir); 398 sdata->local->hw.wiphy->debugfsdir);
295 add_files(sdata); 399 add_files(sdata);
@@ -304,58 +408,18 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
304 sdata->debugfs.dir = NULL; 408 sdata->debugfs.dir = NULL;
305} 409}
306 410
307static int netdev_notify(struct notifier_block *nb, 411void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
308 unsigned long state,
309 void *ndev)
310{ 412{
311 struct net_device *dev = ndev;
312 struct dentry *dir; 413 struct dentry *dir;
313 struct ieee80211_sub_if_data *sdata; 414 char buf[10 + IFNAMSIZ];
314 char buf[10+IFNAMSIZ];
315
316 if (state != NETDEV_CHANGENAME)
317 return 0;
318
319 if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
320 return 0;
321
322 if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
323 return 0;
324
325 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
326 415
327 dir = sdata->debugfs.dir; 416 dir = sdata->debugfs.dir;
328 417
329 if (!dir) 418 if (!dir)
330 return 0; 419 return;
331 420
332 sprintf(buf, "netdev:%s", dev->name); 421 sprintf(buf, "netdev:%s", sdata->name);
333 if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf)) 422 if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf))
334 printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs " 423 printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs "
335 "dir to %s\n", buf); 424 "dir to %s\n", buf);
336
337 return 0;
338}
339
340static struct notifier_block mac80211_debugfs_netdev_notifier = {
341 .notifier_call = netdev_notify,
342};
343
344void ieee80211_debugfs_netdev_init(void)
345{
346 int err;
347
348 err = register_netdevice_notifier(&mac80211_debugfs_netdev_notifier);
349 if (err) {
350 printk(KERN_ERR
351 "mac80211: failed to install netdev notifier,"
352 " disabling per-netdev debugfs!\n");
353 } else
354 notif_registered = 1;
355}
356
357void ieee80211_debugfs_netdev_exit(void)
358{
359 unregister_netdevice_notifier(&mac80211_debugfs_netdev_notifier);
360 notif_registered = 0;
361} 425}
diff --git a/net/mac80211/debugfs_netdev.h b/net/mac80211/debugfs_netdev.h
index 7af731f0b731..79025e79f4d6 100644
--- a/net/mac80211/debugfs_netdev.h
+++ b/net/mac80211/debugfs_netdev.h
@@ -6,8 +6,7 @@
6#ifdef CONFIG_MAC80211_DEBUGFS 6#ifdef CONFIG_MAC80211_DEBUGFS
7void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata); 7void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata);
8void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata); 8void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata);
9void ieee80211_debugfs_netdev_init(void); 9void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata);
10void ieee80211_debugfs_netdev_exit(void);
11#else 10#else
12static inline void ieee80211_debugfs_add_netdev( 11static inline void ieee80211_debugfs_add_netdev(
13 struct ieee80211_sub_if_data *sdata) 12 struct ieee80211_sub_if_data *sdata)
@@ -15,10 +14,8 @@ static inline void ieee80211_debugfs_add_netdev(
15static inline void ieee80211_debugfs_remove_netdev( 14static inline void ieee80211_debugfs_remove_netdev(
16 struct ieee80211_sub_if_data *sdata) 15 struct ieee80211_sub_if_data *sdata)
17{} 16{}
18static inline void ieee80211_debugfs_netdev_init(void) 17static inline void ieee80211_debugfs_rename_netdev(
19{} 18 struct ieee80211_sub_if_data *sdata)
20
21static inline void ieee80211_debugfs_netdev_exit(void)
22{} 19{}
23#endif 20#endif
24 21
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 3f41608c8081..d92800bb2d2f 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -44,7 +44,7 @@ static const struct file_operations sta_ ##name## _ops = { \
44 STA_OPS(name) 44 STA_OPS(name)
45 45
46STA_FILE(aid, sta.aid, D); 46STA_FILE(aid, sta.aid, D);
47STA_FILE(dev, sdata->dev->name, S); 47STA_FILE(dev, sdata->name, S);
48STA_FILE(rx_packets, rx_packets, LU); 48STA_FILE(rx_packets, rx_packets, LU);
49STA_FILE(tx_packets, tx_packets, LU); 49STA_FILE(tx_packets, tx_packets, LU);
50STA_FILE(rx_bytes, rx_bytes, LU); 50STA_FILE(rx_bytes, rx_bytes, LU);
@@ -120,36 +120,38 @@ STA_OPS(last_seq_ctrl);
120static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, 120static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
121 size_t count, loff_t *ppos) 121 size_t count, loff_t *ppos)
122{ 122{
123 char buf[30 + STA_TID_NUM * 70], *p = buf; 123 char buf[64 + STA_TID_NUM * 40], *p = buf;
124 int i; 124 int i;
125 struct sta_info *sta = file->private_data; 125 struct sta_info *sta = file->private_data;
126 126
127 spin_lock_bh(&sta->lock); 127 spin_lock_bh(&sta->lock);
128 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n", 128 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
129 sta->ampdu_mlme.dialog_token_allocator + 1); 129 sta->ampdu_mlme.dialog_token_allocator + 1);
130 p += scnprintf(p, sizeof(buf) + buf - p,
131 "TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n");
130 for (i = 0; i < STA_TID_NUM; i++) { 132 for (i = 0; i < STA_TID_NUM; i++) {
131 p += scnprintf(p, sizeof(buf)+buf-p, "TID %02d:", i); 133 p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i);
132 p += scnprintf(p, sizeof(buf)+buf-p, " RX=%x", 134 p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
133 sta->ampdu_mlme.tid_state_rx[i]); 135 sta->ampdu_mlme.tid_state_rx[i]);
134 p += scnprintf(p, sizeof(buf)+buf-p, "/DTKN=%#.2x", 136 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
135 sta->ampdu_mlme.tid_state_rx[i] ? 137 sta->ampdu_mlme.tid_state_rx[i] ?
136 sta->ampdu_mlme.tid_rx[i]->dialog_token : 0); 138 sta->ampdu_mlme.tid_rx[i]->dialog_token : 0);
137 p += scnprintf(p, sizeof(buf)+buf-p, "/SSN=%#.3x", 139 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
138 sta->ampdu_mlme.tid_state_rx[i] ? 140 sta->ampdu_mlme.tid_state_rx[i] ?
139 sta->ampdu_mlme.tid_rx[i]->ssn : 0); 141 sta->ampdu_mlme.tid_rx[i]->ssn : 0);
140 142
141 p += scnprintf(p, sizeof(buf)+buf-p, " TX=%x", 143 p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
142 sta->ampdu_mlme.tid_state_tx[i]); 144 sta->ampdu_mlme.tid_state_tx[i]);
143 p += scnprintf(p, sizeof(buf)+buf-p, "/DTKN=%#.2x", 145 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
144 sta->ampdu_mlme.tid_state_tx[i] ? 146 sta->ampdu_mlme.tid_state_tx[i] ?
145 sta->ampdu_mlme.tid_tx[i]->dialog_token : 0); 147 sta->ampdu_mlme.tid_tx[i]->dialog_token : 0);
146 p += scnprintf(p, sizeof(buf)+buf-p, "/SSN=%#.3x", 148 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
147 sta->ampdu_mlme.tid_state_tx[i] ? 149 sta->ampdu_mlme.tid_state_tx[i] ?
148 sta->ampdu_mlme.tid_tx[i]->ssn : 0); 150 sta->ampdu_mlme.tid_tx[i]->ssn : 0);
149 p += scnprintf(p, sizeof(buf)+buf-p, "/pending=%03d", 151 p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d",
150 sta->ampdu_mlme.tid_state_tx[i] ? 152 sta->ampdu_mlme.tid_state_tx[i] ?
151 skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0); 153 skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0);
152 p += scnprintf(p, sizeof(buf)+buf-p, "\n"); 154 p += scnprintf(p, sizeof(buf) + buf - p, "\n");
153 } 155 }
154 spin_unlock_bh(&sta->lock); 156 spin_unlock_bh(&sta->lock);
155 157
@@ -160,7 +162,12 @@ STA_OPS(agg_status);
160static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf, 162static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
161 size_t count, loff_t *ppos) 163 size_t count, loff_t *ppos)
162{ 164{
163 char buf[200], *p = buf; 165#define PRINT_HT_CAP(_cond, _str) \
166 do { \
167 if (_cond) \
168 p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \
169 } while (0)
170 char buf[512], *p = buf;
164 int i; 171 int i;
165 struct sta_info *sta = file->private_data; 172 struct sta_info *sta = file->private_data;
166 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap; 173 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
@@ -168,15 +175,64 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
168 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n", 175 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
169 htc->ht_supported ? "" : "not "); 176 htc->ht_supported ? "" : "not ");
170 if (htc->ht_supported) { 177 if (htc->ht_supported) {
171 p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.2x\n", htc->cap); 178 p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap);
179
180 PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDCP");
181 PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40");
182 PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20");
183
184 PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 0, "Static SM Power Save");
185 PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 1, "Dynamic SM Power Save");
186 PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 3, "SM Power Save disabled");
187
188 PRINT_HT_CAP((htc->cap & BIT(4)), "RX Greenfield");
189 PRINT_HT_CAP((htc->cap & BIT(5)), "RX HT20 SGI");
190 PRINT_HT_CAP((htc->cap & BIT(6)), "RX HT40 SGI");
191 PRINT_HT_CAP((htc->cap & BIT(7)), "TX STBC");
192
193 PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 0, "No RX STBC");
194 PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 1, "RX STBC 1-stream");
195 PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 2, "RX STBC 2-streams");
196 PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 3, "RX STBC 3-streams");
197
198 PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack");
199
200 PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
201 "3839 bytes");
202 PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: "
203 "7935 bytes");
204
205 /*
206 * For beacons and probe response this would mean the BSS
207 * does or does not allow the usage of DSSS/CCK HT40.
208 * Otherwise it means the STA does or does not use
209 * DSSS/CCK HT40.
210 */
211 PRINT_HT_CAP((htc->cap & BIT(12)), "DSSS/CCK HT40");
212 PRINT_HT_CAP(!(htc->cap & BIT(12)), "No DSSS/CCK HT40");
213
214 /* BIT(13) is reserved */
215
216 PRINT_HT_CAP((htc->cap & BIT(14)), "40 MHz Intolerant");
217
218 PRINT_HT_CAP((htc->cap & BIT(15)), "L-SIG TXOP protection");
219
172 p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n", 220 p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n",
173 htc->ampdu_factor, htc->ampdu_density); 221 htc->ampdu_factor, htc->ampdu_density);
174 p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:"); 222 p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:");
223
175 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) 224 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
176 p += scnprintf(p, sizeof(buf)+buf-p, " %.2x", 225 p += scnprintf(p, sizeof(buf)+buf-p, " %.2x",
177 htc->mcs.rx_mask[i]); 226 htc->mcs.rx_mask[i]);
178 p += scnprintf(p, sizeof(buf)+buf-p, "\nMCS rx highest: %d\n", 227 p += scnprintf(p, sizeof(buf)+buf-p, "\n");
179 le16_to_cpu(htc->mcs.rx_highest)); 228
229 /* If not set this is meaningless */
230 if (le16_to_cpu(htc->mcs.rx_highest)) {
231 p += scnprintf(p, sizeof(buf)+buf-p,
232 "MCS rx highest: %d Mbps\n",
233 le16_to_cpu(htc->mcs.rx_highest));
234 }
235
180 p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n", 236 p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n",
181 htc->mcs.tx_params); 237 htc->mcs.tx_params);
182 } 238 }
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 921dd9c9ff62..c3d844093a2f 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -14,6 +14,8 @@ static inline int drv_start(struct ieee80211_local *local)
14{ 14{
15 int ret; 15 int ret;
16 16
17 might_sleep();
18
17 local->started = true; 19 local->started = true;
18 smp_mb(); 20 smp_mb();
19 ret = local->ops->start(&local->hw); 21 ret = local->ops->start(&local->hw);
@@ -23,6 +25,8 @@ static inline int drv_start(struct ieee80211_local *local)
23 25
24static inline void drv_stop(struct ieee80211_local *local) 26static inline void drv_stop(struct ieee80211_local *local)
25{ 27{
28 might_sleep();
29
26 local->ops->stop(&local->hw); 30 local->ops->stop(&local->hw);
27 trace_drv_stop(local); 31 trace_drv_stop(local);
28 32
@@ -36,35 +40,47 @@ static inline void drv_stop(struct ieee80211_local *local)
36} 40}
37 41
38static inline int drv_add_interface(struct ieee80211_local *local, 42static inline int drv_add_interface(struct ieee80211_local *local,
39 struct ieee80211_if_init_conf *conf) 43 struct ieee80211_vif *vif)
40{ 44{
41 int ret = local->ops->add_interface(&local->hw, conf); 45 int ret;
42 trace_drv_add_interface(local, conf->mac_addr, conf->vif, ret); 46
47 might_sleep();
48
49 ret = local->ops->add_interface(&local->hw, vif);
50 trace_drv_add_interface(local, vif_to_sdata(vif), ret);
43 return ret; 51 return ret;
44} 52}
45 53
46static inline void drv_remove_interface(struct ieee80211_local *local, 54static inline void drv_remove_interface(struct ieee80211_local *local,
47 struct ieee80211_if_init_conf *conf) 55 struct ieee80211_vif *vif)
48{ 56{
49 local->ops->remove_interface(&local->hw, conf); 57 might_sleep();
50 trace_drv_remove_interface(local, conf->mac_addr, conf->vif); 58
59 local->ops->remove_interface(&local->hw, vif);
60 trace_drv_remove_interface(local, vif_to_sdata(vif));
51} 61}
52 62
53static inline int drv_config(struct ieee80211_local *local, u32 changed) 63static inline int drv_config(struct ieee80211_local *local, u32 changed)
54{ 64{
55 int ret = local->ops->config(&local->hw, changed); 65 int ret;
66
67 might_sleep();
68
69 ret = local->ops->config(&local->hw, changed);
56 trace_drv_config(local, changed, ret); 70 trace_drv_config(local, changed, ret);
57 return ret; 71 return ret;
58} 72}
59 73
60static inline void drv_bss_info_changed(struct ieee80211_local *local, 74static inline void drv_bss_info_changed(struct ieee80211_local *local,
61 struct ieee80211_vif *vif, 75 struct ieee80211_sub_if_data *sdata,
62 struct ieee80211_bss_conf *info, 76 struct ieee80211_bss_conf *info,
63 u32 changed) 77 u32 changed)
64{ 78{
79 might_sleep();
80
65 if (local->ops->bss_info_changed) 81 if (local->ops->bss_info_changed)
66 local->ops->bss_info_changed(&local->hw, vif, info, changed); 82 local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed);
67 trace_drv_bss_info_changed(local, vif, info, changed); 83 trace_drv_bss_info_changed(local, sdata, info, changed);
68} 84}
69 85
70static inline u64 drv_prepare_multicast(struct ieee80211_local *local, 86static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
@@ -106,36 +122,53 @@ static inline int drv_set_tim(struct ieee80211_local *local,
106} 122}
107 123
108static inline int drv_set_key(struct ieee80211_local *local, 124static inline int drv_set_key(struct ieee80211_local *local,
109 enum set_key_cmd cmd, struct ieee80211_vif *vif, 125 enum set_key_cmd cmd,
126 struct ieee80211_sub_if_data *sdata,
110 struct ieee80211_sta *sta, 127 struct ieee80211_sta *sta,
111 struct ieee80211_key_conf *key) 128 struct ieee80211_key_conf *key)
112{ 129{
113 int ret = local->ops->set_key(&local->hw, cmd, vif, sta, key); 130 int ret;
114 trace_drv_set_key(local, cmd, vif, sta, key, ret); 131
132 might_sleep();
133
134 ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
135 trace_drv_set_key(local, cmd, sdata, sta, key, ret);
115 return ret; 136 return ret;
116} 137}
117 138
118static inline void drv_update_tkip_key(struct ieee80211_local *local, 139static inline void drv_update_tkip_key(struct ieee80211_local *local,
140 struct ieee80211_sub_if_data *sdata,
119 struct ieee80211_key_conf *conf, 141 struct ieee80211_key_conf *conf,
120 const u8 *address, u32 iv32, 142 struct sta_info *sta, u32 iv32,
121 u16 *phase1key) 143 u16 *phase1key)
122{ 144{
145 struct ieee80211_sta *ista = NULL;
146
147 if (sta)
148 ista = &sta->sta;
149
123 if (local->ops->update_tkip_key) 150 if (local->ops->update_tkip_key)
124 local->ops->update_tkip_key(&local->hw, conf, address, 151 local->ops->update_tkip_key(&local->hw, &sdata->vif, conf,
125 iv32, phase1key); 152 ista, iv32, phase1key);
126 trace_drv_update_tkip_key(local, conf, address, iv32); 153 trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
127} 154}
128 155
129static inline int drv_hw_scan(struct ieee80211_local *local, 156static inline int drv_hw_scan(struct ieee80211_local *local,
130 struct cfg80211_scan_request *req) 157 struct cfg80211_scan_request *req)
131{ 158{
132 int ret = local->ops->hw_scan(&local->hw, req); 159 int ret;
160
161 might_sleep();
162
163 ret = local->ops->hw_scan(&local->hw, req);
133 trace_drv_hw_scan(local, req, ret); 164 trace_drv_hw_scan(local, req, ret);
134 return ret; 165 return ret;
135} 166}
136 167
137static inline void drv_sw_scan_start(struct ieee80211_local *local) 168static inline void drv_sw_scan_start(struct ieee80211_local *local)
138{ 169{
170 might_sleep();
171
139 if (local->ops->sw_scan_start) 172 if (local->ops->sw_scan_start)
140 local->ops->sw_scan_start(&local->hw); 173 local->ops->sw_scan_start(&local->hw);
141 trace_drv_sw_scan_start(local); 174 trace_drv_sw_scan_start(local);
@@ -143,6 +176,8 @@ static inline void drv_sw_scan_start(struct ieee80211_local *local)
143 176
144static inline void drv_sw_scan_complete(struct ieee80211_local *local) 177static inline void drv_sw_scan_complete(struct ieee80211_local *local)
145{ 178{
179 might_sleep();
180
146 if (local->ops->sw_scan_complete) 181 if (local->ops->sw_scan_complete)
147 local->ops->sw_scan_complete(&local->hw); 182 local->ops->sw_scan_complete(&local->hw);
148 trace_drv_sw_scan_complete(local); 183 trace_drv_sw_scan_complete(local);
@@ -153,6 +188,8 @@ static inline int drv_get_stats(struct ieee80211_local *local,
153{ 188{
154 int ret = -EOPNOTSUPP; 189 int ret = -EOPNOTSUPP;
155 190
191 might_sleep();
192
156 if (local->ops->get_stats) 193 if (local->ops->get_stats)
157 ret = local->ops->get_stats(&local->hw, stats); 194 ret = local->ops->get_stats(&local->hw, stats);
158 trace_drv_get_stats(local, stats, ret); 195 trace_drv_get_stats(local, stats, ret);
@@ -172,43 +209,93 @@ static inline int drv_set_rts_threshold(struct ieee80211_local *local,
172 u32 value) 209 u32 value)
173{ 210{
174 int ret = 0; 211 int ret = 0;
212
213 might_sleep();
214
175 if (local->ops->set_rts_threshold) 215 if (local->ops->set_rts_threshold)
176 ret = local->ops->set_rts_threshold(&local->hw, value); 216 ret = local->ops->set_rts_threshold(&local->hw, value);
177 trace_drv_set_rts_threshold(local, value, ret); 217 trace_drv_set_rts_threshold(local, value, ret);
178 return ret; 218 return ret;
179} 219}
180 220
221static inline int drv_set_coverage_class(struct ieee80211_local *local,
222 u8 value)
223{
224 int ret = 0;
225 might_sleep();
226
227 if (local->ops->set_coverage_class)
228 local->ops->set_coverage_class(&local->hw, value);
229 else
230 ret = -EOPNOTSUPP;
231
232 trace_drv_set_coverage_class(local, value, ret);
233 return ret;
234}
235
181static inline void drv_sta_notify(struct ieee80211_local *local, 236static inline void drv_sta_notify(struct ieee80211_local *local,
182 struct ieee80211_vif *vif, 237 struct ieee80211_sub_if_data *sdata,
183 enum sta_notify_cmd cmd, 238 enum sta_notify_cmd cmd,
184 struct ieee80211_sta *sta) 239 struct ieee80211_sta *sta)
185{ 240{
186 if (local->ops->sta_notify) 241 if (local->ops->sta_notify)
187 local->ops->sta_notify(&local->hw, vif, cmd, sta); 242 local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta);
188 trace_drv_sta_notify(local, vif, cmd, sta); 243 trace_drv_sta_notify(local, sdata, cmd, sta);
244}
245
246static inline int drv_sta_add(struct ieee80211_local *local,
247 struct ieee80211_sub_if_data *sdata,
248 struct ieee80211_sta *sta)
249{
250 int ret = 0;
251
252 might_sleep();
253
254 if (local->ops->sta_add)
255 ret = local->ops->sta_add(&local->hw, &sdata->vif, sta);
256 else if (local->ops->sta_notify)
257 local->ops->sta_notify(&local->hw, &sdata->vif,
258 STA_NOTIFY_ADD, sta);
259
260 trace_drv_sta_add(local, sdata, sta, ret);
261
262 return ret;
263}
264
265static inline void drv_sta_remove(struct ieee80211_local *local,
266 struct ieee80211_sub_if_data *sdata,
267 struct ieee80211_sta *sta)
268{
269 might_sleep();
270
271 if (local->ops->sta_remove)
272 local->ops->sta_remove(&local->hw, &sdata->vif, sta);
273 else if (local->ops->sta_notify)
274 local->ops->sta_notify(&local->hw, &sdata->vif,
275 STA_NOTIFY_REMOVE, sta);
276
277 trace_drv_sta_remove(local, sdata, sta);
189} 278}
190 279
191static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue, 280static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
192 const struct ieee80211_tx_queue_params *params) 281 const struct ieee80211_tx_queue_params *params)
193{ 282{
194 int ret = -EOPNOTSUPP; 283 int ret = -EOPNOTSUPP;
284
285 might_sleep();
286
195 if (local->ops->conf_tx) 287 if (local->ops->conf_tx)
196 ret = local->ops->conf_tx(&local->hw, queue, params); 288 ret = local->ops->conf_tx(&local->hw, queue, params);
197 trace_drv_conf_tx(local, queue, params, ret); 289 trace_drv_conf_tx(local, queue, params, ret);
198 return ret; 290 return ret;
199} 291}
200 292
201static inline int drv_get_tx_stats(struct ieee80211_local *local,
202 struct ieee80211_tx_queue_stats *stats)
203{
204 int ret = local->ops->get_tx_stats(&local->hw, stats);
205 trace_drv_get_tx_stats(local, stats, ret);
206 return ret;
207}
208
209static inline u64 drv_get_tsf(struct ieee80211_local *local) 293static inline u64 drv_get_tsf(struct ieee80211_local *local)
210{ 294{
211 u64 ret = -1ULL; 295 u64 ret = -1ULL;
296
297 might_sleep();
298
212 if (local->ops->get_tsf) 299 if (local->ops->get_tsf)
213 ret = local->ops->get_tsf(&local->hw); 300 ret = local->ops->get_tsf(&local->hw);
214 trace_drv_get_tsf(local, ret); 301 trace_drv_get_tsf(local, ret);
@@ -217,6 +304,8 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local)
217 304
218static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf) 305static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
219{ 306{
307 might_sleep();
308
220 if (local->ops->set_tsf) 309 if (local->ops->set_tsf)
221 local->ops->set_tsf(&local->hw, tsf); 310 local->ops->set_tsf(&local->hw, tsf);
222 trace_drv_set_tsf(local, tsf); 311 trace_drv_set_tsf(local, tsf);
@@ -224,6 +313,8 @@ static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
224 313
225static inline void drv_reset_tsf(struct ieee80211_local *local) 314static inline void drv_reset_tsf(struct ieee80211_local *local)
226{ 315{
316 might_sleep();
317
227 if (local->ops->reset_tsf) 318 if (local->ops->reset_tsf)
228 local->ops->reset_tsf(&local->hw); 319 local->ops->reset_tsf(&local->hw);
229 trace_drv_reset_tsf(local); 320 trace_drv_reset_tsf(local);
@@ -232,6 +323,9 @@ static inline void drv_reset_tsf(struct ieee80211_local *local)
232static inline int drv_tx_last_beacon(struct ieee80211_local *local) 323static inline int drv_tx_last_beacon(struct ieee80211_local *local)
233{ 324{
234 int ret = 1; 325 int ret = 1;
326
327 might_sleep();
328
235 if (local->ops->tx_last_beacon) 329 if (local->ops->tx_last_beacon)
236 ret = local->ops->tx_last_beacon(&local->hw); 330 ret = local->ops->tx_last_beacon(&local->hw);
237 trace_drv_tx_last_beacon(local, ret); 331 trace_drv_tx_last_beacon(local, ret);
@@ -239,23 +333,34 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local)
239} 333}
240 334
241static inline int drv_ampdu_action(struct ieee80211_local *local, 335static inline int drv_ampdu_action(struct ieee80211_local *local,
242 struct ieee80211_vif *vif, 336 struct ieee80211_sub_if_data *sdata,
243 enum ieee80211_ampdu_mlme_action action, 337 enum ieee80211_ampdu_mlme_action action,
244 struct ieee80211_sta *sta, u16 tid, 338 struct ieee80211_sta *sta, u16 tid,
245 u16 *ssn) 339 u16 *ssn)
246{ 340{
247 int ret = -EOPNOTSUPP; 341 int ret = -EOPNOTSUPP;
248 if (local->ops->ampdu_action) 342 if (local->ops->ampdu_action)
249 ret = local->ops->ampdu_action(&local->hw, vif, action, 343 ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
250 sta, tid, ssn); 344 sta, tid, ssn);
251 trace_drv_ampdu_action(local, vif, action, sta, tid, ssn, ret); 345 trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, ret);
252 return ret; 346 return ret;
253} 347}
254 348
255 349
256static inline void drv_rfkill_poll(struct ieee80211_local *local) 350static inline void drv_rfkill_poll(struct ieee80211_local *local)
257{ 351{
352 might_sleep();
353
258 if (local->ops->rfkill_poll) 354 if (local->ops->rfkill_poll)
259 local->ops->rfkill_poll(&local->hw); 355 local->ops->rfkill_poll(&local->hw);
260} 356}
357
358static inline void drv_flush(struct ieee80211_local *local, bool drop)
359{
360 might_sleep();
361
362 trace_drv_flush(local, drop);
363 if (local->ops->flush)
364 local->ops->flush(&local->hw, drop);
365}
261#endif /* __MAC80211_DRIVER_OPS */ 366#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index ee94ea0c67e9..41baf730a5c7 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -25,10 +25,12 @@ static inline void trace_ ## name(proto) {}
25#define STA_PR_FMT " sta:%pM" 25#define STA_PR_FMT " sta:%pM"
26#define STA_PR_ARG __entry->sta_addr 26#define STA_PR_ARG __entry->sta_addr
27 27
28#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, vif) 28#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \
29#define VIF_ASSIGN __entry->vif_type = vif ? vif->type : 0; __entry->vif = vif 29 __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
30#define VIF_PR_FMT " vif:%p(%d)" 30#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \
31#define VIF_PR_ARG __entry->vif, __entry->vif_type 31 __assign_str(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
32#define VIF_PR_FMT " vif:%s(%d)"
33#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type
32 34
33TRACE_EVENT(drv_start, 35TRACE_EVENT(drv_start,
34 TP_PROTO(struct ieee80211_local *local, int ret), 36 TP_PROTO(struct ieee80211_local *local, int ret),
@@ -70,11 +72,10 @@ TRACE_EVENT(drv_stop,
70 72
71TRACE_EVENT(drv_add_interface, 73TRACE_EVENT(drv_add_interface,
72 TP_PROTO(struct ieee80211_local *local, 74 TP_PROTO(struct ieee80211_local *local,
73 const u8 *addr, 75 struct ieee80211_sub_if_data *sdata,
74 struct ieee80211_vif *vif,
75 int ret), 76 int ret),
76 77
77 TP_ARGS(local, addr, vif, ret), 78 TP_ARGS(local, sdata, ret),
78 79
79 TP_STRUCT__entry( 80 TP_STRUCT__entry(
80 LOCAL_ENTRY 81 LOCAL_ENTRY
@@ -86,7 +87,7 @@ TRACE_EVENT(drv_add_interface,
86 TP_fast_assign( 87 TP_fast_assign(
87 LOCAL_ASSIGN; 88 LOCAL_ASSIGN;
88 VIF_ASSIGN; 89 VIF_ASSIGN;
89 memcpy(__entry->addr, addr, 6); 90 memcpy(__entry->addr, sdata->vif.addr, 6);
90 __entry->ret = ret; 91 __entry->ret = ret;
91 ), 92 ),
92 93
@@ -97,10 +98,9 @@ TRACE_EVENT(drv_add_interface,
97); 98);
98 99
99TRACE_EVENT(drv_remove_interface, 100TRACE_EVENT(drv_remove_interface,
100 TP_PROTO(struct ieee80211_local *local, 101 TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata),
101 const u8 *addr, struct ieee80211_vif *vif),
102 102
103 TP_ARGS(local, addr, vif), 103 TP_ARGS(local, sdata),
104 104
105 TP_STRUCT__entry( 105 TP_STRUCT__entry(
106 LOCAL_ENTRY 106 LOCAL_ENTRY
@@ -111,7 +111,7 @@ TRACE_EVENT(drv_remove_interface,
111 TP_fast_assign( 111 TP_fast_assign(
112 LOCAL_ASSIGN; 112 LOCAL_ASSIGN;
113 VIF_ASSIGN; 113 VIF_ASSIGN;
114 memcpy(__entry->addr, addr, 6); 114 memcpy(__entry->addr, sdata->vif.addr, 6);
115 ), 115 ),
116 116
117 TP_printk( 117 TP_printk(
@@ -140,6 +140,7 @@ TRACE_EVENT(drv_config,
140 __field(u8, short_frame_max_tx_count) 140 __field(u8, short_frame_max_tx_count)
141 __field(int, center_freq) 141 __field(int, center_freq)
142 __field(int, channel_type) 142 __field(int, channel_type)
143 __field(int, smps)
143 ), 144 ),
144 145
145 TP_fast_assign( 146 TP_fast_assign(
@@ -155,6 +156,7 @@ TRACE_EVENT(drv_config,
155 __entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count; 156 __entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count;
156 __entry->center_freq = local->hw.conf.channel->center_freq; 157 __entry->center_freq = local->hw.conf.channel->center_freq;
157 __entry->channel_type = local->hw.conf.channel_type; 158 __entry->channel_type = local->hw.conf.channel_type;
159 __entry->smps = local->hw.conf.smps_mode;
158 ), 160 ),
159 161
160 TP_printk( 162 TP_printk(
@@ -165,11 +167,11 @@ TRACE_EVENT(drv_config,
165 167
166TRACE_EVENT(drv_bss_info_changed, 168TRACE_EVENT(drv_bss_info_changed,
167 TP_PROTO(struct ieee80211_local *local, 169 TP_PROTO(struct ieee80211_local *local,
168 struct ieee80211_vif *vif, 170 struct ieee80211_sub_if_data *sdata,
169 struct ieee80211_bss_conf *info, 171 struct ieee80211_bss_conf *info,
170 u32 changed), 172 u32 changed),
171 173
172 TP_ARGS(local, vif, info, changed), 174 TP_ARGS(local, sdata, info, changed),
173 175
174 TP_STRUCT__entry( 176 TP_STRUCT__entry(
175 LOCAL_ENTRY 177 LOCAL_ENTRY
@@ -293,11 +295,11 @@ TRACE_EVENT(drv_set_tim,
293 295
294TRACE_EVENT(drv_set_key, 296TRACE_EVENT(drv_set_key,
295 TP_PROTO(struct ieee80211_local *local, 297 TP_PROTO(struct ieee80211_local *local,
296 enum set_key_cmd cmd, struct ieee80211_vif *vif, 298 enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata,
297 struct ieee80211_sta *sta, 299 struct ieee80211_sta *sta,
298 struct ieee80211_key_conf *key, int ret), 300 struct ieee80211_key_conf *key, int ret),
299 301
300 TP_ARGS(local, cmd, vif, sta, key, ret), 302 TP_ARGS(local, cmd, sdata, sta, key, ret),
301 303
302 TP_STRUCT__entry( 304 TP_STRUCT__entry(
303 LOCAL_ENTRY 305 LOCAL_ENTRY
@@ -329,26 +331,29 @@ TRACE_EVENT(drv_set_key,
329 331
330TRACE_EVENT(drv_update_tkip_key, 332TRACE_EVENT(drv_update_tkip_key,
331 TP_PROTO(struct ieee80211_local *local, 333 TP_PROTO(struct ieee80211_local *local,
334 struct ieee80211_sub_if_data *sdata,
332 struct ieee80211_key_conf *conf, 335 struct ieee80211_key_conf *conf,
333 const u8 *address, u32 iv32), 336 struct ieee80211_sta *sta, u32 iv32),
334 337
335 TP_ARGS(local, conf, address, iv32), 338 TP_ARGS(local, sdata, conf, sta, iv32),
336 339
337 TP_STRUCT__entry( 340 TP_STRUCT__entry(
338 LOCAL_ENTRY 341 LOCAL_ENTRY
339 __array(u8, addr, 6) 342 VIF_ENTRY
343 STA_ENTRY
340 __field(u32, iv32) 344 __field(u32, iv32)
341 ), 345 ),
342 346
343 TP_fast_assign( 347 TP_fast_assign(
344 LOCAL_ASSIGN; 348 LOCAL_ASSIGN;
345 memcpy(__entry->addr, address, 6); 349 VIF_ASSIGN;
350 STA_ASSIGN;
346 __entry->iv32 = iv32; 351 __entry->iv32 = iv32;
347 ), 352 ),
348 353
349 TP_printk( 354 TP_printk(
350 LOCAL_PR_FMT " addr:%pM iv32:%#x", 355 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " iv32:%#x",
351 LOCAL_PR_ARG, __entry->addr, __entry->iv32 356 LOCAL_PR_ARG,VIF_PR_ARG,STA_PR_ARG, __entry->iv32
352 ) 357 )
353); 358);
354 359
@@ -489,13 +494,36 @@ TRACE_EVENT(drv_set_rts_threshold,
489 ) 494 )
490); 495);
491 496
497TRACE_EVENT(drv_set_coverage_class,
498 TP_PROTO(struct ieee80211_local *local, u8 value, int ret),
499
500 TP_ARGS(local, value, ret),
501
502 TP_STRUCT__entry(
503 LOCAL_ENTRY
504 __field(u8, value)
505 __field(int, ret)
506 ),
507
508 TP_fast_assign(
509 LOCAL_ASSIGN;
510 __entry->ret = ret;
511 __entry->value = value;
512 ),
513
514 TP_printk(
515 LOCAL_PR_FMT " value:%d ret:%d",
516 LOCAL_PR_ARG, __entry->value, __entry->ret
517 )
518);
519
492TRACE_EVENT(drv_sta_notify, 520TRACE_EVENT(drv_sta_notify,
493 TP_PROTO(struct ieee80211_local *local, 521 TP_PROTO(struct ieee80211_local *local,
494 struct ieee80211_vif *vif, 522 struct ieee80211_sub_if_data *sdata,
495 enum sta_notify_cmd cmd, 523 enum sta_notify_cmd cmd,
496 struct ieee80211_sta *sta), 524 struct ieee80211_sta *sta),
497 525
498 TP_ARGS(local, vif, cmd, sta), 526 TP_ARGS(local, sdata, cmd, sta),
499 527
500 TP_STRUCT__entry( 528 TP_STRUCT__entry(
501 LOCAL_ENTRY 529 LOCAL_ENTRY
@@ -517,59 +545,88 @@ TRACE_EVENT(drv_sta_notify,
517 ) 545 )
518); 546);
519 547
520TRACE_EVENT(drv_conf_tx, 548TRACE_EVENT(drv_sta_add,
521 TP_PROTO(struct ieee80211_local *local, u16 queue, 549 TP_PROTO(struct ieee80211_local *local,
522 const struct ieee80211_tx_queue_params *params, 550 struct ieee80211_sub_if_data *sdata,
523 int ret), 551 struct ieee80211_sta *sta, int ret),
524 552
525 TP_ARGS(local, queue, params, ret), 553 TP_ARGS(local, sdata, sta, ret),
526 554
527 TP_STRUCT__entry( 555 TP_STRUCT__entry(
528 LOCAL_ENTRY 556 LOCAL_ENTRY
529 __field(u16, queue) 557 VIF_ENTRY
530 __field(u16, txop) 558 STA_ENTRY
531 __field(u16, cw_min)
532 __field(u16, cw_max)
533 __field(u8, aifs)
534 __field(int, ret) 559 __field(int, ret)
535 ), 560 ),
536 561
537 TP_fast_assign( 562 TP_fast_assign(
538 LOCAL_ASSIGN; 563 LOCAL_ASSIGN;
539 __entry->queue = queue; 564 VIF_ASSIGN;
565 STA_ASSIGN;
540 __entry->ret = ret; 566 __entry->ret = ret;
541 __entry->txop = params->txop;
542 __entry->cw_max = params->cw_max;
543 __entry->cw_min = params->cw_min;
544 __entry->aifs = params->aifs;
545 ), 567 ),
546 568
547 TP_printk( 569 TP_printk(
548 LOCAL_PR_FMT " queue:%d ret:%d", 570 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d",
549 LOCAL_PR_ARG, __entry->queue, __entry->ret 571 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret
550 ) 572 )
551); 573);
552 574
553TRACE_EVENT(drv_get_tx_stats, 575TRACE_EVENT(drv_sta_remove,
554 TP_PROTO(struct ieee80211_local *local, 576 TP_PROTO(struct ieee80211_local *local,
555 struct ieee80211_tx_queue_stats *stats, 577 struct ieee80211_sub_if_data *sdata,
578 struct ieee80211_sta *sta),
579
580 TP_ARGS(local, sdata, sta),
581
582 TP_STRUCT__entry(
583 LOCAL_ENTRY
584 VIF_ENTRY
585 STA_ENTRY
586 ),
587
588 TP_fast_assign(
589 LOCAL_ASSIGN;
590 VIF_ASSIGN;
591 STA_ASSIGN;
592 ),
593
594 TP_printk(
595 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT,
596 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
597 )
598);
599
600TRACE_EVENT(drv_conf_tx,
601 TP_PROTO(struct ieee80211_local *local, u16 queue,
602 const struct ieee80211_tx_queue_params *params,
556 int ret), 603 int ret),
557 604
558 TP_ARGS(local, stats, ret), 605 TP_ARGS(local, queue, params, ret),
559 606
560 TP_STRUCT__entry( 607 TP_STRUCT__entry(
561 LOCAL_ENTRY 608 LOCAL_ENTRY
609 __field(u16, queue)
610 __field(u16, txop)
611 __field(u16, cw_min)
612 __field(u16, cw_max)
613 __field(u8, aifs)
562 __field(int, ret) 614 __field(int, ret)
563 ), 615 ),
564 616
565 TP_fast_assign( 617 TP_fast_assign(
566 LOCAL_ASSIGN; 618 LOCAL_ASSIGN;
619 __entry->queue = queue;
567 __entry->ret = ret; 620 __entry->ret = ret;
621 __entry->txop = params->txop;
622 __entry->cw_max = params->cw_max;
623 __entry->cw_min = params->cw_min;
624 __entry->aifs = params->aifs;
568 ), 625 ),
569 626
570 TP_printk( 627 TP_printk(
571 LOCAL_PR_FMT " ret:%d", 628 LOCAL_PR_FMT " queue:%d ret:%d",
572 LOCAL_PR_ARG, __entry->ret 629 LOCAL_PR_ARG, __entry->queue, __entry->ret
573 ) 630 )
574); 631);
575 632
@@ -656,12 +713,12 @@ TRACE_EVENT(drv_tx_last_beacon,
656 713
657TRACE_EVENT(drv_ampdu_action, 714TRACE_EVENT(drv_ampdu_action,
658 TP_PROTO(struct ieee80211_local *local, 715 TP_PROTO(struct ieee80211_local *local,
659 struct ieee80211_vif *vif, 716 struct ieee80211_sub_if_data *sdata,
660 enum ieee80211_ampdu_mlme_action action, 717 enum ieee80211_ampdu_mlme_action action,
661 struct ieee80211_sta *sta, u16 tid, 718 struct ieee80211_sta *sta, u16 tid,
662 u16 *ssn, int ret), 719 u16 *ssn, int ret),
663 720
664 TP_ARGS(local, vif, action, sta, tid, ssn, ret), 721 TP_ARGS(local, sdata, action, sta, tid, ssn, ret),
665 722
666 TP_STRUCT__entry( 723 TP_STRUCT__entry(
667 LOCAL_ENTRY 724 LOCAL_ENTRY
@@ -680,7 +737,7 @@ TRACE_EVENT(drv_ampdu_action,
680 __entry->ret = ret; 737 __entry->ret = ret;
681 __entry->action = action; 738 __entry->action = action;
682 __entry->tid = tid; 739 __entry->tid = tid;
683 __entry->ssn = *ssn; 740 __entry->ssn = ssn ? *ssn : 0;
684 ), 741 ),
685 742
686 TP_printk( 743 TP_printk(
@@ -688,6 +745,27 @@ TRACE_EVENT(drv_ampdu_action,
688 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret 745 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret
689 ) 746 )
690); 747);
748
749TRACE_EVENT(drv_flush,
750 TP_PROTO(struct ieee80211_local *local, bool drop),
751
752 TP_ARGS(local, drop),
753
754 TP_STRUCT__entry(
755 LOCAL_ENTRY
756 __field(bool, drop)
757 ),
758
759 TP_fast_assign(
760 LOCAL_ASSIGN;
761 __entry->drop = drop;
762 ),
763
764 TP_printk(
765 LOCAL_PR_FMT " drop:%d",
766 LOCAL_PR_ARG, __entry->drop
767 )
768);
691#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ 769#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
692 770
693#undef TRACE_INCLUDE_PATH 771#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 3787455fb696..bb677a73b7c9 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -34,9 +34,28 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
34 34
35 ht_cap->ht_supported = true; 35 ht_cap->ht_supported = true;
36 36
37 ht_cap->cap = le16_to_cpu(ht_cap_ie->cap_info) & sband->ht_cap.cap; 37 /*
38 ht_cap->cap &= ~IEEE80211_HT_CAP_SM_PS; 38 * The bits listed in this expression should be
39 ht_cap->cap |= sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS; 39 * the same for the peer and us, if the station
40 * advertises more then we can't use those thus
41 * we mask them out.
42 */
43 ht_cap->cap = le16_to_cpu(ht_cap_ie->cap_info) &
44 (sband->ht_cap.cap |
45 ~(IEEE80211_HT_CAP_LDPC_CODING |
46 IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
47 IEEE80211_HT_CAP_GRN_FLD |
48 IEEE80211_HT_CAP_SGI_20 |
49 IEEE80211_HT_CAP_SGI_40 |
50 IEEE80211_HT_CAP_DSSSCCK40));
51 /*
52 * The STBC bits are asymmetric -- if we don't have
53 * TX then mask out the peer's RX and vice versa.
54 */
55 if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC))
56 ht_cap->cap &= ~IEEE80211_HT_CAP_RX_STBC;
57 if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC))
58 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
40 59
41 ampdu_info = ht_cap_ie->ampdu_params_info; 60 ampdu_info = ht_cap_ie->ampdu_params_info;
42 ht_cap->ampdu_factor = 61 ht_cap->ampdu_factor =
@@ -106,7 +125,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
106 125
107 if (!skb) { 126 if (!skb) {
108 printk(KERN_ERR "%s: failed to allocate buffer " 127 printk(KERN_ERR "%s: failed to allocate buffer "
109 "for delba frame\n", sdata->dev->name); 128 "for delba frame\n", sdata->name);
110 return; 129 return;
111 } 130 }
112 131
@@ -114,10 +133,10 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
114 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 133 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
115 memset(mgmt, 0, 24); 134 memset(mgmt, 0, 24);
116 memcpy(mgmt->da, da, ETH_ALEN); 135 memcpy(mgmt->da, da, ETH_ALEN);
117 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 136 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
118 if (sdata->vif.type == NL80211_IFTYPE_AP || 137 if (sdata->vif.type == NL80211_IFTYPE_AP ||
119 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 138 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
120 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 139 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
121 else if (sdata->vif.type == NL80211_IFTYPE_STATION) 140 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
122 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); 141 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
123 142
@@ -166,3 +185,50 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
166 spin_unlock_bh(&sta->lock); 185 spin_unlock_bh(&sta->lock);
167 } 186 }
168} 187}
188
189int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
190 enum ieee80211_smps_mode smps, const u8 *da,
191 const u8 *bssid)
192{
193 struct ieee80211_local *local = sdata->local;
194 struct sk_buff *skb;
195 struct ieee80211_mgmt *action_frame;
196
197 /* 27 = header + category + action + smps mode */
198 skb = dev_alloc_skb(27 + local->hw.extra_tx_headroom);
199 if (!skb)
200 return -ENOMEM;
201
202 skb_reserve(skb, local->hw.extra_tx_headroom);
203 action_frame = (void *)skb_put(skb, 27);
204 memcpy(action_frame->da, da, ETH_ALEN);
205 memcpy(action_frame->sa, sdata->dev->dev_addr, ETH_ALEN);
206 memcpy(action_frame->bssid, bssid, ETH_ALEN);
207 action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
208 IEEE80211_STYPE_ACTION);
209 action_frame->u.action.category = WLAN_CATEGORY_HT;
210 action_frame->u.action.u.ht_smps.action = WLAN_HT_ACTION_SMPS;
211 switch (smps) {
212 case IEEE80211_SMPS_AUTOMATIC:
213 case IEEE80211_SMPS_NUM_MODES:
214 WARN_ON(1);
215 case IEEE80211_SMPS_OFF:
216 action_frame->u.action.u.ht_smps.smps_control =
217 WLAN_HT_SMPS_CONTROL_DISABLED;
218 break;
219 case IEEE80211_SMPS_STATIC:
220 action_frame->u.action.u.ht_smps.smps_control =
221 WLAN_HT_SMPS_CONTROL_STATIC;
222 break;
223 case IEEE80211_SMPS_DYNAMIC:
224 action_frame->u.action.u.ht_smps.smps_control =
225 WLAN_HT_SMPS_CONTROL_DYNAMIC;
226 break;
227 }
228
229 /* we'll do more on status of this frame */
230 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
231 ieee80211_tx_skb(sdata, skb);
232
233 return 0;
234}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 10d13856f86c..f3e942486749 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -117,7 +117,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
117 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 117 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
118 IEEE80211_STYPE_PROBE_RESP); 118 IEEE80211_STYPE_PROBE_RESP);
119 memset(mgmt->da, 0xff, ETH_ALEN); 119 memset(mgmt->da, 0xff, ETH_ALEN);
120 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 120 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
121 memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN); 121 memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
122 mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int); 122 mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int);
123 mgmt->u.beacon.timestamp = cpu_to_le64(tsf); 123 mgmt->u.beacon.timestamp = cpu_to_le64(tsf);
@@ -187,15 +187,17 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
187static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, 187static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
188 struct ieee80211_bss *bss) 188 struct ieee80211_bss *bss)
189{ 189{
190 struct cfg80211_bss *cbss =
191 container_of((void *)bss, struct cfg80211_bss, priv);
190 struct ieee80211_supported_band *sband; 192 struct ieee80211_supported_band *sband;
191 u32 basic_rates; 193 u32 basic_rates;
192 int i, j; 194 int i, j;
193 u16 beacon_int = bss->cbss.beacon_interval; 195 u16 beacon_int = cbss->beacon_interval;
194 196
195 if (beacon_int < 10) 197 if (beacon_int < 10)
196 beacon_int = 10; 198 beacon_int = 10;
197 199
198 sband = sdata->local->hw.wiphy->bands[bss->cbss.channel->band]; 200 sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
199 201
200 basic_rates = 0; 202 basic_rates = 0;
201 203
@@ -212,12 +214,12 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
212 } 214 }
213 } 215 }
214 216
215 __ieee80211_sta_join_ibss(sdata, bss->cbss.bssid, 217 __ieee80211_sta_join_ibss(sdata, cbss->bssid,
216 beacon_int, 218 beacon_int,
217 bss->cbss.channel, 219 cbss->channel,
218 basic_rates, 220 basic_rates,
219 bss->cbss.capability, 221 cbss->capability,
220 bss->cbss.tsf); 222 cbss->tsf);
221} 223}
222 224
223static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, 225static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
@@ -229,6 +231,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
229{ 231{
230 struct ieee80211_local *local = sdata->local; 232 struct ieee80211_local *local = sdata->local;
231 int freq; 233 int freq;
234 struct cfg80211_bss *cbss;
232 struct ieee80211_bss *bss; 235 struct ieee80211_bss *bss;
233 struct sta_info *sta; 236 struct sta_info *sta;
234 struct ieee80211_channel *channel; 237 struct ieee80211_channel *channel;
@@ -252,7 +255,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
252 255
253 rcu_read_lock(); 256 rcu_read_lock();
254 257
255 sta = sta_info_get(local, mgmt->sa); 258 sta = sta_info_get(sdata, mgmt->sa);
256 if (sta) { 259 if (sta) {
257 u32 prev_rates; 260 u32 prev_rates;
258 261
@@ -266,16 +269,18 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
266 printk(KERN_DEBUG "%s: updated supp_rates set " 269 printk(KERN_DEBUG "%s: updated supp_rates set "
267 "for %pM based on beacon info (0x%llx | " 270 "for %pM based on beacon info (0x%llx | "
268 "0x%llx -> 0x%llx)\n", 271 "0x%llx -> 0x%llx)\n",
269 sdata->dev->name, 272 sdata->name,
270 sta->sta.addr, 273 sta->sta.addr,
271 (unsigned long long) prev_rates, 274 (unsigned long long) prev_rates,
272 (unsigned long long) supp_rates, 275 (unsigned long long) supp_rates,
273 (unsigned long long) sta->sta.supp_rates[band]); 276 (unsigned long long) sta->sta.supp_rates[band]);
274#endif 277#endif
275 } else 278 rcu_read_unlock();
276 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates); 279 } else {
277 280 rcu_read_unlock();
278 rcu_read_unlock(); 281 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
282 supp_rates, GFP_KERNEL);
283 }
279 } 284 }
280 285
281 bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems, 286 bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
@@ -283,25 +288,23 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
283 if (!bss) 288 if (!bss)
284 return; 289 return;
285 290
291 cbss = container_of((void *)bss, struct cfg80211_bss, priv);
292
286 /* was just updated in ieee80211_bss_info_update */ 293 /* was just updated in ieee80211_bss_info_update */
287 beacon_timestamp = bss->cbss.tsf; 294 beacon_timestamp = cbss->tsf;
288 295
289 /* check if we need to merge IBSS */ 296 /* check if we need to merge IBSS */
290 297
291 /* merge only on beacons (???) */
292 if (!beacon)
293 goto put_bss;
294
295 /* we use a fixed BSSID */ 298 /* we use a fixed BSSID */
296 if (sdata->u.ibss.bssid) 299 if (sdata->u.ibss.fixed_bssid)
297 goto put_bss; 300 goto put_bss;
298 301
299 /* not an IBSS */ 302 /* not an IBSS */
300 if (!(bss->cbss.capability & WLAN_CAPABILITY_IBSS)) 303 if (!(cbss->capability & WLAN_CAPABILITY_IBSS))
301 goto put_bss; 304 goto put_bss;
302 305
303 /* different channel */ 306 /* different channel */
304 if (bss->cbss.channel != local->oper_channel) 307 if (cbss->channel != local->oper_channel)
305 goto put_bss; 308 goto put_bss;
306 309
307 /* different SSID */ 310 /* different SSID */
@@ -311,7 +314,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
311 goto put_bss; 314 goto put_bss;
312 315
313 /* same BSSID */ 316 /* same BSSID */
314 if (memcmp(bss->cbss.bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0) 317 if (memcmp(cbss->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0)
315 goto put_bss; 318 goto put_bss;
316 319
317 if (rx_status->flag & RX_FLAG_TSFT) { 320 if (rx_status->flag & RX_FLAG_TSFT) {
@@ -364,10 +367,11 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
364#ifdef CONFIG_MAC80211_IBSS_DEBUG 367#ifdef CONFIG_MAC80211_IBSS_DEBUG
365 printk(KERN_DEBUG "%s: beacon TSF higher than " 368 printk(KERN_DEBUG "%s: beacon TSF higher than "
366 "local TSF - IBSS merge with BSSID %pM\n", 369 "local TSF - IBSS merge with BSSID %pM\n",
367 sdata->dev->name, mgmt->bssid); 370 sdata->name, mgmt->bssid);
368#endif 371#endif
369 ieee80211_sta_join_ibss(sdata, bss); 372 ieee80211_sta_join_ibss(sdata, bss);
370 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates); 373 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
374 supp_rates, GFP_KERNEL);
371 } 375 }
372 376
373 put_bss: 377 put_bss:
@@ -380,8 +384,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
380 * must be callable in atomic context. 384 * must be callable in atomic context.
381 */ 385 */
382struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, 386struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
383 u8 *bssid,u8 *addr, u32 supp_rates) 387 u8 *bssid,u8 *addr, u32 supp_rates,
388 gfp_t gfp)
384{ 389{
390 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
385 struct ieee80211_local *local = sdata->local; 391 struct ieee80211_local *local = sdata->local;
386 struct sta_info *sta; 392 struct sta_info *sta;
387 int band = local->hw.conf.channel->band; 393 int band = local->hw.conf.channel->band;
@@ -393,19 +399,22 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
393 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { 399 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
394 if (net_ratelimit()) 400 if (net_ratelimit())
395 printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n", 401 printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
396 sdata->dev->name, addr); 402 sdata->name, addr);
397 return NULL; 403 return NULL;
398 } 404 }
399 405
406 if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH)
407 return NULL;
408
400 if (compare_ether_addr(bssid, sdata->u.ibss.bssid)) 409 if (compare_ether_addr(bssid, sdata->u.ibss.bssid))
401 return NULL; 410 return NULL;
402 411
403#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 412#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
404 printk(KERN_DEBUG "%s: Adding new IBSS station %pM (dev=%s)\n", 413 printk(KERN_DEBUG "%s: Adding new IBSS station %pM (dev=%s)\n",
405 wiphy_name(local->hw.wiphy), addr, sdata->dev->name); 414 wiphy_name(local->hw.wiphy), addr, sdata->name);
406#endif 415#endif
407 416
408 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); 417 sta = sta_info_alloc(sdata, addr, gfp);
409 if (!sta) 418 if (!sta)
410 return NULL; 419 return NULL;
411 420
@@ -417,9 +426,9 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
417 426
418 rate_control_rate_init(sta); 427 rate_control_rate_init(sta);
419 428
429 /* If it fails, maybe we raced another insertion? */
420 if (sta_info_insert(sta)) 430 if (sta_info_insert(sta))
421 return NULL; 431 return sta_info_get(sdata, addr);
422
423 return sta; 432 return sta;
424} 433}
425 434
@@ -445,6 +454,9 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
445 return active; 454 return active;
446} 455}
447 456
457/*
458 * This function is called with state == IEEE80211_IBSS_MLME_JOINED
459 */
448 460
449static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata) 461static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
450{ 462{
@@ -466,7 +478,7 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
466 return; 478 return;
467 479
468 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " 480 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other "
469 "IBSS networks with same SSID (merge)\n", sdata->dev->name); 481 "IBSS networks with same SSID (merge)\n", sdata->name);
470 482
471 ieee80211_request_internal_scan(sdata, ifibss->ssid, ifibss->ssid_len); 483 ieee80211_request_internal_scan(sdata, ifibss->ssid, ifibss->ssid_len);
472} 484}
@@ -488,13 +500,13 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
488 * random number generator get different BSSID. */ 500 * random number generator get different BSSID. */
489 get_random_bytes(bssid, ETH_ALEN); 501 get_random_bytes(bssid, ETH_ALEN);
490 for (i = 0; i < ETH_ALEN; i++) 502 for (i = 0; i < ETH_ALEN; i++)
491 bssid[i] ^= sdata->dev->dev_addr[i]; 503 bssid[i] ^= sdata->vif.addr[i];
492 bssid[0] &= ~0x01; 504 bssid[0] &= ~0x01;
493 bssid[0] |= 0x02; 505 bssid[0] |= 0x02;
494 } 506 }
495 507
496 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n", 508 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n",
497 sdata->dev->name, bssid); 509 sdata->name, bssid);
498 510
499 sband = local->hw.wiphy->bands[ifibss->channel->band]; 511 sband = local->hw.wiphy->bands[ifibss->channel->band];
500 512
@@ -510,11 +522,15 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
510 capability, 0); 522 capability, 0);
511} 523}
512 524
525/*
526 * This function is called with state == IEEE80211_IBSS_MLME_SEARCH
527 */
528
513static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) 529static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
514{ 530{
515 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 531 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
516 struct ieee80211_local *local = sdata->local; 532 struct ieee80211_local *local = sdata->local;
517 struct ieee80211_bss *bss; 533 struct cfg80211_bss *cbss;
518 struct ieee80211_channel *chan = NULL; 534 struct ieee80211_channel *chan = NULL;
519 const u8 *bssid = NULL; 535 const u8 *bssid = NULL;
520 int active_ibss; 536 int active_ibss;
@@ -523,7 +539,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
523 active_ibss = ieee80211_sta_active_ibss(sdata); 539 active_ibss = ieee80211_sta_active_ibss(sdata);
524#ifdef CONFIG_MAC80211_IBSS_DEBUG 540#ifdef CONFIG_MAC80211_IBSS_DEBUG
525 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n", 541 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n",
526 sdata->dev->name, active_ibss); 542 sdata->name, active_ibss);
527#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 543#endif /* CONFIG_MAC80211_IBSS_DEBUG */
528 544
529 if (active_ibss) 545 if (active_ibss)
@@ -538,21 +554,23 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
538 chan = ifibss->channel; 554 chan = ifibss->channel;
539 if (!is_zero_ether_addr(ifibss->bssid)) 555 if (!is_zero_ether_addr(ifibss->bssid))
540 bssid = ifibss->bssid; 556 bssid = ifibss->bssid;
541 bss = (void *)cfg80211_get_bss(local->hw.wiphy, chan, bssid, 557 cbss = cfg80211_get_bss(local->hw.wiphy, chan, bssid,
542 ifibss->ssid, ifibss->ssid_len, 558 ifibss->ssid, ifibss->ssid_len,
543 WLAN_CAPABILITY_IBSS | 559 WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_PRIVACY,
544 WLAN_CAPABILITY_PRIVACY, 560 capability);
545 capability); 561
562 if (cbss) {
563 struct ieee80211_bss *bss;
546 564
547 if (bss) { 565 bss = (void *)cbss->priv;
548#ifdef CONFIG_MAC80211_IBSS_DEBUG 566#ifdef CONFIG_MAC80211_IBSS_DEBUG
549 printk(KERN_DEBUG " sta_find_ibss: selected %pM current " 567 printk(KERN_DEBUG " sta_find_ibss: selected %pM current "
550 "%pM\n", bss->cbss.bssid, ifibss->bssid); 568 "%pM\n", cbss->bssid, ifibss->bssid);
551#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 569#endif /* CONFIG_MAC80211_IBSS_DEBUG */
552 570
553 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM" 571 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM"
554 " based on configured SSID\n", 572 " based on configured SSID\n",
555 sdata->dev->name, bss->cbss.bssid); 573 sdata->name, cbss->bssid);
556 574
557 ieee80211_sta_join_ibss(sdata, bss); 575 ieee80211_sta_join_ibss(sdata, bss);
558 ieee80211_rx_bss_put(local, bss); 576 ieee80211_rx_bss_put(local, bss);
@@ -564,18 +582,14 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
564#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 582#endif /* CONFIG_MAC80211_IBSS_DEBUG */
565 583
566 /* Selected IBSS not found in current scan results - try to scan */ 584 /* Selected IBSS not found in current scan results - try to scan */
567 if (ifibss->state == IEEE80211_IBSS_MLME_JOINED && 585 if (time_after(jiffies, ifibss->last_scan_completed +
568 !ieee80211_sta_active_ibss(sdata)) {
569 mod_timer(&ifibss->timer,
570 round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
571 } else if (time_after(jiffies, ifibss->last_scan_completed +
572 IEEE80211_SCAN_INTERVAL)) { 586 IEEE80211_SCAN_INTERVAL)) {
573 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " 587 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
574 "join\n", sdata->dev->name); 588 "join\n", sdata->name);
575 589
576 ieee80211_request_internal_scan(sdata, ifibss->ssid, 590 ieee80211_request_internal_scan(sdata, ifibss->ssid,
577 ifibss->ssid_len); 591 ifibss->ssid_len);
578 } else if (ifibss->state != IEEE80211_IBSS_MLME_JOINED) { 592 } else {
579 int interval = IEEE80211_SCAN_INTERVAL; 593 int interval = IEEE80211_SCAN_INTERVAL;
580 594
581 if (time_after(jiffies, ifibss->ibss_join_req + 595 if (time_after(jiffies, ifibss->ibss_join_req +
@@ -585,7 +599,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
585 return; 599 return;
586 } 600 }
587 printk(KERN_DEBUG "%s: IBSS not allowed on" 601 printk(KERN_DEBUG "%s: IBSS not allowed on"
588 " %d MHz\n", sdata->dev->name, 602 " %d MHz\n", sdata->name,
589 local->hw.conf.channel->center_freq); 603 local->hw.conf.channel->center_freq);
590 604
591 /* No IBSS found - decrease scan interval and continue 605 /* No IBSS found - decrease scan interval and continue
@@ -593,7 +607,6 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
593 interval = IEEE80211_SCAN_INTERVAL_SLOW; 607 interval = IEEE80211_SCAN_INTERVAL_SLOW;
594 } 608 }
595 609
596 ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
597 mod_timer(&ifibss->timer, 610 mod_timer(&ifibss->timer,
598 round_jiffies(jiffies + interval)); 611 round_jiffies(jiffies + interval));
599 } 612 }
@@ -619,7 +632,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
619#ifdef CONFIG_MAC80211_IBSS_DEBUG 632#ifdef CONFIG_MAC80211_IBSS_DEBUG
620 printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM" 633 printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM"
621 " (tx_last_beacon=%d)\n", 634 " (tx_last_beacon=%d)\n",
622 sdata->dev->name, mgmt->sa, mgmt->da, 635 sdata->name, mgmt->sa, mgmt->da,
623 mgmt->bssid, tx_last_beacon); 636 mgmt->bssid, tx_last_beacon);
624#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 637#endif /* CONFIG_MAC80211_IBSS_DEBUG */
625 638
@@ -637,13 +650,13 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
637#ifdef CONFIG_MAC80211_IBSS_DEBUG 650#ifdef CONFIG_MAC80211_IBSS_DEBUG
638 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " 651 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq "
639 "from %pM\n", 652 "from %pM\n",
640 sdata->dev->name, mgmt->sa); 653 sdata->name, mgmt->sa);
641#endif 654#endif
642 return; 655 return;
643 } 656 }
644 if (pos[1] != 0 && 657 if (pos[1] != 0 &&
645 (pos[1] != ifibss->ssid_len || 658 (pos[1] != ifibss->ssid_len ||
646 !memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) { 659 memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) {
647 /* Ignore ProbeReq for foreign SSID */ 660 /* Ignore ProbeReq for foreign SSID */
648 return; 661 return;
649 } 662 }
@@ -657,7 +670,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
657 memcpy(resp->da, mgmt->sa, ETH_ALEN); 670 memcpy(resp->da, mgmt->sa, ETH_ALEN);
658#ifdef CONFIG_MAC80211_IBSS_DEBUG 671#ifdef CONFIG_MAC80211_IBSS_DEBUG
659 printk(KERN_DEBUG "%s: Sending ProbeResp to %pM\n", 672 printk(KERN_DEBUG "%s: Sending ProbeResp to %pM\n",
660 sdata->dev->name, resp->da); 673 sdata->name, resp->da);
661#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 674#endif /* CONFIG_MAC80211_IBSS_DEBUG */
662 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 675 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
663 ieee80211_tx_skb(sdata, skb); 676 ieee80211_tx_skb(sdata, skb);
@@ -671,7 +684,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
671 size_t baselen; 684 size_t baselen;
672 struct ieee802_11_elems elems; 685 struct ieee802_11_elems elems;
673 686
674 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) 687 if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
675 return; /* ignore ProbeResp to foreign address */ 688 return; /* ignore ProbeResp to foreign address */
676 689
677 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 690 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -744,7 +757,7 @@ static void ieee80211_ibss_work(struct work_struct *work)
744 if (WARN_ON(local->suspended)) 757 if (WARN_ON(local->suspended))
745 return; 758 return;
746 759
747 if (!netif_running(sdata->dev)) 760 if (!ieee80211_sdata_running(sdata))
748 return; 761 return;
749 762
750 if (local->scanning) 763 if (local->scanning)
@@ -827,7 +840,7 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
827 840
828 mutex_lock(&local->iflist_mtx); 841 mutex_lock(&local->iflist_mtx);
829 list_for_each_entry(sdata, &local->interfaces, list) { 842 list_for_each_entry(sdata, &local->interfaces, list) {
830 if (!netif_running(sdata->dev)) 843 if (!ieee80211_sdata_running(sdata))
831 continue; 844 continue;
832 if (sdata->vif.type != NL80211_IFTYPE_ADHOC) 845 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
833 continue; 846 continue;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 91dc8636d644..241533e1bc03 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -2,7 +2,7 @@
2 * Copyright 2002-2005, Instant802 Networks, Inc. 2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005, Devicescape Software, Inc. 3 * Copyright 2005, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net> 5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -58,6 +58,15 @@ struct ieee80211_local;
58 58
59#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024)) 59#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024))
60 60
61#define IEEE80211_DEFAULT_UAPSD_QUEUES \
62 (IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \
63 IEEE80211_WMM_IE_STA_QOSINFO_AC_BE | \
64 IEEE80211_WMM_IE_STA_QOSINFO_AC_VI | \
65 IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
66
67#define IEEE80211_DEFAULT_MAX_SP_LEN \
68 IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
69
61struct ieee80211_fragment_entry { 70struct ieee80211_fragment_entry {
62 unsigned long first_frag_time; 71 unsigned long first_frag_time;
63 unsigned int seq; 72 unsigned int seq;
@@ -71,9 +80,6 @@ struct ieee80211_fragment_entry {
71 80
72 81
73struct ieee80211_bss { 82struct ieee80211_bss {
74 /* Yes, this is a hack */
75 struct cfg80211_bss cbss;
76
77 /* don't want to look up all the time */ 83 /* don't want to look up all the time */
78 size_t ssid_len; 84 size_t ssid_len;
79 u8 ssid[IEEE80211_MAX_SSID_LEN]; 85 u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -81,6 +87,7 @@ struct ieee80211_bss {
81 u8 dtim_period; 87 u8 dtim_period;
82 88
83 bool wmm_used; 89 bool wmm_used;
90 bool uapsd_supported;
84 91
85 unsigned long last_probe_resp; 92 unsigned long last_probe_resp;
86 93
@@ -140,7 +147,6 @@ typedef unsigned __bitwise__ ieee80211_tx_result;
140 147
141struct ieee80211_tx_data { 148struct ieee80211_tx_data {
142 struct sk_buff *skb; 149 struct sk_buff *skb;
143 struct net_device *dev;
144 struct ieee80211_local *local; 150 struct ieee80211_local *local;
145 struct ieee80211_sub_if_data *sdata; 151 struct ieee80211_sub_if_data *sdata;
146 struct sta_info *sta; 152 struct sta_info *sta;
@@ -228,31 +234,77 @@ struct mesh_preq_queue {
228 u8 flags; 234 u8 flags;
229}; 235};
230 236
231enum ieee80211_mgd_state { 237enum ieee80211_work_type {
232 IEEE80211_MGD_STATE_IDLE, 238 IEEE80211_WORK_ABORT,
233 IEEE80211_MGD_STATE_PROBE, 239 IEEE80211_WORK_DIRECT_PROBE,
234 IEEE80211_MGD_STATE_AUTH, 240 IEEE80211_WORK_AUTH,
235 IEEE80211_MGD_STATE_ASSOC, 241 IEEE80211_WORK_ASSOC,
242 IEEE80211_WORK_REMAIN_ON_CHANNEL,
236}; 243};
237 244
238struct ieee80211_mgd_work { 245/**
246 * enum work_done_result - indicates what to do after work was done
247 *
248 * @WORK_DONE_DESTROY: This work item is no longer needed, destroy.
249 * @WORK_DONE_REQUEUE: This work item was reset to be reused, and
250 * should be requeued.
251 */
252enum work_done_result {
253 WORK_DONE_DESTROY,
254 WORK_DONE_REQUEUE,
255};
256
257struct ieee80211_work {
239 struct list_head list; 258 struct list_head list;
240 struct ieee80211_bss *bss; 259
241 int ie_len; 260 struct rcu_head rcu_head;
242 u8 prev_bssid[ETH_ALEN]; 261
243 u8 ssid[IEEE80211_MAX_SSID_LEN]; 262 struct ieee80211_sub_if_data *sdata;
244 u8 ssid_len; 263
264 enum work_done_result (*done)(struct ieee80211_work *wk,
265 struct sk_buff *skb);
266
267 struct ieee80211_channel *chan;
268 enum nl80211_channel_type chan_type;
269
245 unsigned long timeout; 270 unsigned long timeout;
246 enum ieee80211_mgd_state state; 271 enum ieee80211_work_type type;
247 u16 auth_alg, auth_transaction; 272
273 u8 filter_ta[ETH_ALEN];
248 274
249 int tries; 275 bool started;
250 276
251 u8 key[WLAN_KEY_LEN_WEP104]; 277 union {
252 u8 key_len, key_idx; 278 struct {
279 int tries;
280 u16 algorithm, transaction;
281 u8 ssid[IEEE80211_MAX_SSID_LEN];
282 u8 ssid_len;
283 u8 key[WLAN_KEY_LEN_WEP104];
284 u8 key_len, key_idx;
285 bool privacy;
286 } probe_auth;
287 struct {
288 struct cfg80211_bss *bss;
289 const u8 *supp_rates;
290 const u8 *ht_information_ie;
291 enum ieee80211_smps_mode smps;
292 int tries;
293 u16 capability;
294 u8 prev_bssid[ETH_ALEN];
295 u8 ssid[IEEE80211_MAX_SSID_LEN];
296 u8 ssid_len;
297 u8 supp_rates_len;
298 bool wmm_used, use_11n, uapsd_used;
299 } assoc;
300 struct {
301 u32 duration;
302 } remain;
303 };
253 304
305 int ie_len;
254 /* must be last */ 306 /* must be last */
255 u8 ie[0]; /* for auth or assoc frame, not probe */ 307 u8 ie[0];
256}; 308};
257 309
258/* flags used in struct ieee80211_if_managed.flags */ 310/* flags used in struct ieee80211_if_managed.flags */
@@ -260,15 +312,11 @@ enum ieee80211_sta_flags {
260 IEEE80211_STA_BEACON_POLL = BIT(0), 312 IEEE80211_STA_BEACON_POLL = BIT(0),
261 IEEE80211_STA_CONNECTION_POLL = BIT(1), 313 IEEE80211_STA_CONNECTION_POLL = BIT(1),
262 IEEE80211_STA_CONTROL_PORT = BIT(2), 314 IEEE80211_STA_CONTROL_PORT = BIT(2),
263 IEEE80211_STA_WMM_ENABLED = BIT(3),
264 IEEE80211_STA_DISABLE_11N = BIT(4), 315 IEEE80211_STA_DISABLE_11N = BIT(4),
265 IEEE80211_STA_CSA_RECEIVED = BIT(5), 316 IEEE80211_STA_CSA_RECEIVED = BIT(5),
266 IEEE80211_STA_MFP_ENABLED = BIT(6), 317 IEEE80211_STA_MFP_ENABLED = BIT(6),
267}; 318 IEEE80211_STA_UAPSD_ENABLED = BIT(7),
268 319 IEEE80211_STA_NULLFUNC_ACKED = BIT(8),
269/* flags for MLME request */
270enum ieee80211_sta_request {
271 IEEE80211_STA_REQ_SCAN,
272}; 320};
273 321
274struct ieee80211_if_managed { 322struct ieee80211_if_managed {
@@ -285,21 +333,18 @@ struct ieee80211_if_managed {
285 int probe_send_count; 333 int probe_send_count;
286 334
287 struct mutex mtx; 335 struct mutex mtx;
288 struct ieee80211_bss *associated; 336 struct cfg80211_bss *associated;
289 struct ieee80211_mgd_work *old_associate_work;
290 struct list_head work_list;
291 337
292 u8 bssid[ETH_ALEN]; 338 u8 bssid[ETH_ALEN];
293 339
294 u16 aid; 340 u16 aid;
295 u16 capab;
296 341
297 struct sk_buff_head skb_queue; 342 struct sk_buff_head skb_queue;
298 343
299 unsigned long timers_running; /* used for quiesce/restart */ 344 unsigned long timers_running; /* used for quiesce/restart */
300 bool powersave; /* powersave requested for this iface */ 345 bool powersave; /* powersave requested for this iface */
301 346 enum ieee80211_smps_mode req_smps, /* requested smps mode */
302 unsigned long request; 347 ap_smps; /* smps mode AP thinks we're in */
303 348
304 unsigned int flags; 349 unsigned int flags;
305 350
@@ -433,6 +478,8 @@ struct ieee80211_sub_if_data {
433 478
434 int drop_unencrypted; 479 int drop_unencrypted;
435 480
481 char name[IFNAMSIZ];
482
436 /* 483 /*
437 * keep track of whether the HT opmode (stored in 484 * keep track of whether the HT opmode (stored in
438 * vif.bss_info.ht_operation_mode) is valid. 485 * vif.bss_info.ht_operation_mode) is valid.
@@ -458,8 +505,8 @@ struct ieee80211_sub_if_data {
458 */ 505 */
459 struct ieee80211_if_ap *bss; 506 struct ieee80211_if_ap *bss;
460 507
461 int force_unicast_rateidx; /* forced TX rateidx for unicast frames */ 508 /* bitmap of allowed (non-MCS) rate indexes for rate control */
462 int max_ratectrl_rateidx; /* max TX rateidx for rate control */ 509 u32 rc_rateidx_mask[IEEE80211_NUM_BANDS];
463 510
464 union { 511 union {
465 struct ieee80211_if_ap ap; 512 struct ieee80211_if_ap ap;
@@ -565,6 +612,15 @@ struct ieee80211_local {
565 const struct ieee80211_ops *ops; 612 const struct ieee80211_ops *ops;
566 613
567 /* 614 /*
615 * work stuff, potentially off-channel (in the future)
616 */
617 struct mutex work_mtx;
618 struct list_head work_list;
619 struct timer_list work_timer;
620 struct work_struct work_work;
621 struct sk_buff_head work_skb_queue;
622
623 /*
568 * private workqueue to mac80211. mac80211 makes this accessible 624 * private workqueue to mac80211. mac80211 makes this accessible
569 * via ieee80211_queue_work() 625 * via ieee80211_queue_work()
570 */ 626 */
@@ -586,6 +642,9 @@ struct ieee80211_local {
586 /* used for uploading changed mc list */ 642 /* used for uploading changed mc list */
587 struct work_struct reconfig_filter; 643 struct work_struct reconfig_filter;
588 644
645 /* used to reconfigure hardware SM PS */
646 struct work_struct recalc_smps;
647
589 /* aggregated multicast list */ 648 /* aggregated multicast list */
590 struct dev_addr_list *mc_list; 649 struct dev_addr_list *mc_list;
591 int mc_count; 650 int mc_count;
@@ -630,15 +689,18 @@ struct ieee80211_local {
630 689
631 /* Station data */ 690 /* Station data */
632 /* 691 /*
633 * The lock only protects the list, hash, timer and counter 692 * The mutex only protects the list and counter,
634 * against manipulation, reads are done in RCU. Additionally, 693 * reads are done in RCU.
635 * the lock protects each BSS's TIM bitmap. 694 * Additionally, the lock protects the hash table,
695 * the pending list and each BSS's TIM bitmap.
636 */ 696 */
697 struct mutex sta_mtx;
637 spinlock_t sta_lock; 698 spinlock_t sta_lock;
638 unsigned long num_sta; 699 unsigned long num_sta;
639 struct list_head sta_list; 700 struct list_head sta_list, sta_pending_list;
640 struct sta_info *sta_hash[STA_HASH_SIZE]; 701 struct sta_info *sta_hash[STA_HASH_SIZE];
641 struct timer_list sta_cleanup; 702 struct timer_list sta_cleanup;
703 struct work_struct sta_finish_work;
642 int sta_generation; 704 int sta_generation;
643 705
644 struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; 706 struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
@@ -689,6 +751,10 @@ struct ieee80211_local {
689 enum nl80211_channel_type oper_channel_type; 751 enum nl80211_channel_type oper_channel_type;
690 struct ieee80211_channel *oper_channel, *csa_channel; 752 struct ieee80211_channel *oper_channel, *csa_channel;
691 753
754 /* Temporary remain-on-channel for off-channel operations */
755 struct ieee80211_channel *tmp_channel;
756 enum nl80211_channel_type tmp_channel_type;
757
692 /* SNMP counters */ 758 /* SNMP counters */
693 /* dot11CountersTable */ 759 /* dot11CountersTable */
694 u32 dot11TransmittedFragmentCount; 760 u32 dot11TransmittedFragmentCount;
@@ -708,10 +774,6 @@ struct ieee80211_local {
708 assoc_led_name[32], radio_led_name[32]; 774 assoc_led_name[32], radio_led_name[32];
709#endif 775#endif
710 776
711#ifdef CONFIG_MAC80211_DEBUGFS
712 struct work_struct sta_debugfs_add;
713#endif
714
715#ifdef CONFIG_MAC80211_DEBUG_COUNTERS 777#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
716 /* TX/RX handler statistics */ 778 /* TX/RX handler statistics */
717 unsigned int tx_handlers_drop; 779 unsigned int tx_handlers_drop;
@@ -745,8 +807,22 @@ struct ieee80211_local {
745 int wifi_wme_noack_test; 807 int wifi_wme_noack_test;
746 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ 808 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
747 809
810 /*
811 * Bitmask of enabled u-apsd queues,
812 * IEEE80211_WMM_IE_STA_QOSINFO_AC_BE & co. Needs a new association
813 * to take effect.
814 */
815 unsigned int uapsd_queues;
816
817 /*
818 * Maximum number of buffered frames AP can deliver during a
819 * service period, IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL or similar.
820 * Needs a new association to take effect.
821 */
822 unsigned int uapsd_max_sp_len;
823
748 bool pspolling; 824 bool pspolling;
749 bool scan_ps_enabled; 825 bool offchannel_ps_enabled;
750 /* 826 /*
751 * PS can only be enabled when we have exactly one managed 827 * PS can only be enabled when we have exactly one managed
752 * interface (and monitors) in PS, this then points there. 828 * interface (and monitors) in PS, this then points there.
@@ -760,6 +836,8 @@ struct ieee80211_local {
760 int user_power_level; /* in dBm */ 836 int user_power_level; /* in dBm */
761 int power_constr_level; /* in dBm */ 837 int power_constr_level; /* in dBm */
762 838
839 enum ieee80211_smps_mode smps_mode;
840
763 struct work_struct restart_work; 841 struct work_struct restart_work;
764 842
765#ifdef CONFIG_MAC80211_DEBUGFS 843#ifdef CONFIG_MAC80211_DEBUGFS
@@ -874,6 +952,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
874void ieee80211_configure_filter(struct ieee80211_local *local); 952void ieee80211_configure_filter(struct ieee80211_local *local);
875u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata); 953u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
876 954
955extern bool ieee80211_disable_40mhz_24ghz;
956
877/* STA code */ 957/* STA code */
878void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata); 958void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata);
879int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, 959int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
@@ -886,6 +966,10 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
886int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, 966int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
887 struct cfg80211_disassoc_request *req, 967 struct cfg80211_disassoc_request *req,
888 void *cookie); 968 void *cookie);
969int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
970 struct ieee80211_channel *chan,
971 enum nl80211_channel_type channel_type,
972 const u8 *buf, size_t len, u64 *cookie);
889ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, 973ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
890 struct sk_buff *skb); 974 struct sk_buff *skb);
891void ieee80211_send_pspoll(struct ieee80211_local *local, 975void ieee80211_send_pspoll(struct ieee80211_local *local,
@@ -905,7 +989,8 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata);
905ieee80211_rx_result 989ieee80211_rx_result
906ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); 990ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
907struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, 991struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
908 u8 *bssid, u8 *addr, u32 supp_rates); 992 u8 *bssid, u8 *addr, u32 supp_rates,
993 gfp_t gfp);
909int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, 994int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
910 struct cfg80211_ibss_params *params); 995 struct cfg80211_ibss_params *params);
911int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata); 996int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
@@ -937,7 +1022,15 @@ ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
937void ieee80211_rx_bss_put(struct ieee80211_local *local, 1022void ieee80211_rx_bss_put(struct ieee80211_local *local,
938 struct ieee80211_bss *bss); 1023 struct ieee80211_bss *bss);
939 1024
1025/* off-channel helpers */
1026void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local);
1027void ieee80211_offchannel_stop_station(struct ieee80211_local *local);
1028void ieee80211_offchannel_return(struct ieee80211_local *local,
1029 bool enable_beaconing);
1030
940/* interface handling */ 1031/* interface handling */
1032int ieee80211_iface_init(void);
1033void ieee80211_iface_exit(void);
941int ieee80211_if_add(struct ieee80211_local *local, const char *name, 1034int ieee80211_if_add(struct ieee80211_local *local, const char *name,
942 struct net_device **new_dev, enum nl80211_iftype type, 1035 struct net_device **new_dev, enum nl80211_iftype type,
943 struct vif_params *params); 1036 struct vif_params *params);
@@ -948,6 +1041,11 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local);
948u32 __ieee80211_recalc_idle(struct ieee80211_local *local); 1041u32 __ieee80211_recalc_idle(struct ieee80211_local *local);
949void ieee80211_recalc_idle(struct ieee80211_local *local); 1042void ieee80211_recalc_idle(struct ieee80211_local *local);
950 1043
1044static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
1045{
1046 return netif_running(sdata->dev);
1047}
1048
951/* tx handling */ 1049/* tx handling */
952void ieee80211_clear_tx_pending(struct ieee80211_local *local); 1050void ieee80211_clear_tx_pending(struct ieee80211_local *local);
953void ieee80211_tx_pending(unsigned long data); 1051void ieee80211_tx_pending(unsigned long data);
@@ -976,6 +1074,9 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
976void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, 1074void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
977 const u8 *da, u16 tid, 1075 const u8 *da, u16 tid,
978 u16 initiator, u16 reason_code); 1076 u16 initiator, u16 reason_code);
1077int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
1078 enum ieee80211_smps_mode smps, const u8 *da,
1079 const u8 *bssid);
979 1080
980void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da, 1081void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da,
981 u16 tid, u16 initiator, u16 reason); 1082 u16 tid, u16 initiator, u16 reason);
@@ -1086,6 +1187,28 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
1086u32 ieee80211_sta_get_rates(struct ieee80211_local *local, 1187u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
1087 struct ieee802_11_elems *elems, 1188 struct ieee802_11_elems *elems,
1088 enum ieee80211_band band); 1189 enum ieee80211_band band);
1190int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
1191 enum ieee80211_smps_mode smps_mode);
1192void ieee80211_recalc_smps(struct ieee80211_local *local,
1193 struct ieee80211_sub_if_data *forsdata);
1194
1195size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
1196 const u8 *ids, int n_ids, size_t offset);
1197size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
1198
1199/* internal work items */
1200void ieee80211_work_init(struct ieee80211_local *local);
1201void ieee80211_add_work(struct ieee80211_work *wk);
1202void free_work(struct ieee80211_work *wk);
1203void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata);
1204ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
1205 struct sk_buff *skb);
1206int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1207 struct ieee80211_channel *chan,
1208 enum nl80211_channel_type channel_type,
1209 unsigned int duration, u64 *cookie);
1210int ieee80211_wk_cancel_remain_on_channel(
1211 struct ieee80211_sub_if_data *sdata, u64 cookie);
1089 1212
1090#ifdef CONFIG_MAC80211_NOINLINE 1213#ifdef CONFIG_MAC80211_NOINLINE
1091#define debug_noinline noinline 1214#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 80c16f6e2af6..0793d7a8d743 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -15,12 +15,14 @@
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <net/mac80211.h> 17#include <net/mac80211.h>
18#include <net/ieee80211_radiotap.h>
18#include "ieee80211_i.h" 19#include "ieee80211_i.h"
19#include "sta_info.h" 20#include "sta_info.h"
20#include "debugfs_netdev.h" 21#include "debugfs_netdev.h"
21#include "mesh.h" 22#include "mesh.h"
22#include "led.h" 23#include "led.h"
23#include "driver-ops.h" 24#include "driver-ops.h"
25#include "wme.h"
24 26
25/** 27/**
26 * DOC: Interface list locking 28 * DOC: Interface list locking
@@ -60,6 +62,23 @@ static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
60 return 0; 62 return 0;
61} 63}
62 64
65static int ieee80211_change_mac(struct net_device *dev, void *addr)
66{
67 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
68 struct sockaddr *sa = addr;
69 int ret;
70
71 if (ieee80211_sdata_running(sdata))
72 return -EBUSY;
73
74 ret = eth_mac_addr(dev, sa);
75
76 if (ret == 0)
77 memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN);
78
79 return ret;
80}
81
63static inline int identical_mac_addr_allowed(int type1, int type2) 82static inline int identical_mac_addr_allowed(int type1, int type2)
64{ 83{
65 return type1 == NL80211_IFTYPE_MONITOR || 84 return type1 == NL80211_IFTYPE_MONITOR ||
@@ -80,7 +99,6 @@ static int ieee80211_open(struct net_device *dev)
80 struct ieee80211_sub_if_data *nsdata; 99 struct ieee80211_sub_if_data *nsdata;
81 struct ieee80211_local *local = sdata->local; 100 struct ieee80211_local *local = sdata->local;
82 struct sta_info *sta; 101 struct sta_info *sta;
83 struct ieee80211_if_init_conf conf;
84 u32 changed = 0; 102 u32 changed = 0;
85 int res; 103 int res;
86 u32 hw_reconf_flags = 0; 104 u32 hw_reconf_flags = 0;
@@ -95,7 +113,7 @@ static int ieee80211_open(struct net_device *dev)
95 list_for_each_entry(nsdata, &local->interfaces, list) { 113 list_for_each_entry(nsdata, &local->interfaces, list) {
96 struct net_device *ndev = nsdata->dev; 114 struct net_device *ndev = nsdata->dev;
97 115
98 if (ndev != dev && netif_running(ndev)) { 116 if (ndev != dev && ieee80211_sdata_running(nsdata)) {
99 /* 117 /*
100 * Allow only a single IBSS interface to be up at any 118 * Allow only a single IBSS interface to be up at any
101 * time. This is restricted because beacon distribution 119 * time. This is restricted because beacon distribution
@@ -181,7 +199,7 @@ static int ieee80211_open(struct net_device *dev)
181 struct net_device *ndev = nsdata->dev; 199 struct net_device *ndev = nsdata->dev;
182 200
183 /* 201 /*
184 * No need to check netif_running since we do not allow 202 * No need to check running since we do not allow
185 * it to start up with this invalid address. 203 * it to start up with this invalid address.
186 */ 204 */
187 if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) { 205 if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) {
@@ -232,10 +250,7 @@ static int ieee80211_open(struct net_device *dev)
232 ieee80211_configure_filter(local); 250 ieee80211_configure_filter(local);
233 break; 251 break;
234 default: 252 default:
235 conf.vif = &sdata->vif; 253 res = drv_add_interface(local, &sdata->vif);
236 conf.type = sdata->vif.type;
237 conf.mac_addr = dev->dev_addr;
238 res = drv_add_interface(local, &conf);
239 if (res) 254 if (res)
240 goto err_stop; 255 goto err_stop;
241 256
@@ -314,11 +329,11 @@ static int ieee80211_open(struct net_device *dev)
314 if (sdata->vif.type == NL80211_IFTYPE_STATION) 329 if (sdata->vif.type == NL80211_IFTYPE_STATION)
315 ieee80211_queue_work(&local->hw, &sdata->u.mgd.work); 330 ieee80211_queue_work(&local->hw, &sdata->u.mgd.work);
316 331
317 netif_start_queue(dev); 332 netif_tx_start_all_queues(dev);
318 333
319 return 0; 334 return 0;
320 err_del_interface: 335 err_del_interface:
321 drv_remove_interface(local, &conf); 336 drv_remove_interface(local, &sdata->vif);
322 err_stop: 337 err_stop:
323 if (!local->open_count) 338 if (!local->open_count)
324 drv_stop(local); 339 drv_stop(local);
@@ -333,7 +348,6 @@ static int ieee80211_stop(struct net_device *dev)
333{ 348{
334 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 349 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
335 struct ieee80211_local *local = sdata->local; 350 struct ieee80211_local *local = sdata->local;
336 struct ieee80211_if_init_conf conf;
337 struct sta_info *sta; 351 struct sta_info *sta;
338 unsigned long flags; 352 unsigned long flags;
339 struct sk_buff *skb, *tmp; 353 struct sk_buff *skb, *tmp;
@@ -343,7 +357,12 @@ static int ieee80211_stop(struct net_device *dev)
343 /* 357 /*
344 * Stop TX on this interface first. 358 * Stop TX on this interface first.
345 */ 359 */
346 netif_stop_queue(dev); 360 netif_tx_stop_all_queues(dev);
361
362 /*
363 * Purge work for this interface.
364 */
365 ieee80211_work_purge(sdata);
347 366
348 /* 367 /*
349 * Now delete all active aggregation sessions. 368 * Now delete all active aggregation sessions.
@@ -512,12 +531,9 @@ static int ieee80211_stop(struct net_device *dev)
512 BSS_CHANGED_BEACON_ENABLED); 531 BSS_CHANGED_BEACON_ENABLED);
513 } 532 }
514 533
515 conf.vif = &sdata->vif;
516 conf.type = sdata->vif.type;
517 conf.mac_addr = dev->dev_addr;
518 /* disable all keys for as long as this netdev is down */ 534 /* disable all keys for as long as this netdev is down */
519 ieee80211_disable_keys(sdata); 535 ieee80211_disable_keys(sdata);
520 drv_remove_interface(local, &conf); 536 drv_remove_interface(local, &sdata->vif);
521 } 537 }
522 538
523 sdata->bss = NULL; 539 sdata->bss = NULL;
@@ -644,6 +660,12 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
644 WARN_ON(flushed); 660 WARN_ON(flushed);
645} 661}
646 662
663static u16 ieee80211_netdev_select_queue(struct net_device *dev,
664 struct sk_buff *skb)
665{
666 return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
667}
668
647static const struct net_device_ops ieee80211_dataif_ops = { 669static const struct net_device_ops ieee80211_dataif_ops = {
648 .ndo_open = ieee80211_open, 670 .ndo_open = ieee80211_open,
649 .ndo_stop = ieee80211_stop, 671 .ndo_stop = ieee80211_stop,
@@ -651,9 +673,43 @@ static const struct net_device_ops ieee80211_dataif_ops = {
651 .ndo_start_xmit = ieee80211_subif_start_xmit, 673 .ndo_start_xmit = ieee80211_subif_start_xmit,
652 .ndo_set_multicast_list = ieee80211_set_multicast_list, 674 .ndo_set_multicast_list = ieee80211_set_multicast_list,
653 .ndo_change_mtu = ieee80211_change_mtu, 675 .ndo_change_mtu = ieee80211_change_mtu,
654 .ndo_set_mac_address = eth_mac_addr, 676 .ndo_set_mac_address = ieee80211_change_mac,
677 .ndo_select_queue = ieee80211_netdev_select_queue,
655}; 678};
656 679
680static u16 ieee80211_monitor_select_queue(struct net_device *dev,
681 struct sk_buff *skb)
682{
683 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
684 struct ieee80211_local *local = sdata->local;
685 struct ieee80211_hdr *hdr;
686 struct ieee80211_radiotap_header *rtap = (void *)skb->data;
687 u8 *p;
688
689 if (local->hw.queues < 4)
690 return 0;
691
692 if (skb->len < 4 ||
693 skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */)
694 return 0; /* doesn't matter, frame will be dropped */
695
696 hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
697
698 if (!ieee80211_is_data(hdr->frame_control)) {
699 skb->priority = 7;
700 return ieee802_1d_to_ac[skb->priority];
701 }
702 if (!ieee80211_is_data_qos(hdr->frame_control)) {
703 skb->priority = 0;
704 return ieee802_1d_to_ac[skb->priority];
705 }
706
707 p = ieee80211_get_qos_ctl(hdr);
708 skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
709
710 return ieee80211_downgrade_queue(local, skb);
711}
712
657static const struct net_device_ops ieee80211_monitorif_ops = { 713static const struct net_device_ops ieee80211_monitorif_ops = {
658 .ndo_open = ieee80211_open, 714 .ndo_open = ieee80211_open,
659 .ndo_stop = ieee80211_stop, 715 .ndo_stop = ieee80211_stop,
@@ -662,6 +718,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
662 .ndo_set_multicast_list = ieee80211_set_multicast_list, 718 .ndo_set_multicast_list = ieee80211_set_multicast_list,
663 .ndo_change_mtu = ieee80211_change_mtu, 719 .ndo_change_mtu = ieee80211_change_mtu,
664 .ndo_set_mac_address = eth_mac_addr, 720 .ndo_set_mac_address = eth_mac_addr,
721 .ndo_select_queue = ieee80211_monitor_select_queue,
665}; 722};
666 723
667static void ieee80211_if_setup(struct net_device *dev) 724static void ieee80211_if_setup(struct net_device *dev)
@@ -740,7 +797,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
740 * and goes into the requested mode. 797 * and goes into the requested mode.
741 */ 798 */
742 799
743 if (netif_running(sdata->dev)) 800 if (ieee80211_sdata_running(sdata))
744 return -EBUSY; 801 return -EBUSY;
745 802
746 /* Purge and reset type-dependent state. */ 803 /* Purge and reset type-dependent state. */
@@ -768,8 +825,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
768 825
769 ASSERT_RTNL(); 826 ASSERT_RTNL();
770 827
771 ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size, 828 ndev = alloc_netdev_mq(sizeof(*sdata) + local->hw.vif_data_size,
772 name, ieee80211_if_setup); 829 name, ieee80211_if_setup, local->hw.queues);
773 if (!ndev) 830 if (!ndev)
774 return -ENOMEM; 831 return -ENOMEM;
775 dev_net_set(ndev, wiphy_net(local->hw.wiphy)); 832 dev_net_set(ndev, wiphy_net(local->hw.wiphy));
@@ -794,6 +851,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
794 /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ 851 /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */
795 sdata = netdev_priv(ndev); 852 sdata = netdev_priv(ndev);
796 ndev->ieee80211_ptr = &sdata->wdev; 853 ndev->ieee80211_ptr = &sdata->wdev;
854 memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
855 memcpy(sdata->name, ndev->name, IFNAMSIZ);
797 856
798 /* initialise type-independent data */ 857 /* initialise type-independent data */
799 sdata->wdev.wiphy = local->hw.wiphy; 858 sdata->wdev.wiphy = local->hw.wiphy;
@@ -805,8 +864,12 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
805 864
806 INIT_LIST_HEAD(&sdata->key_list); 865 INIT_LIST_HEAD(&sdata->key_list);
807 866
808 sdata->force_unicast_rateidx = -1; 867 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
809 sdata->max_ratectrl_rateidx = -1; 868 struct ieee80211_supported_band *sband;
869 sband = local->hw.wiphy->bands[i];
870 sdata->rc_rateidx_mask[i] =
871 sband ? (1 << sband->n_bitrates) - 1 : 0;
872 }
810 873
811 /* setup type-dependent data */ 874 /* setup type-dependent data */
812 ieee80211_setup_sdata(sdata, type); 875 ieee80211_setup_sdata(sdata, type);
@@ -899,6 +962,8 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local)
899 wiphy_name(local->hw.wiphy)); 962 wiphy_name(local->hw.wiphy));
900#endif 963#endif
901 964
965 drv_flush(local, false);
966
902 local->hw.conf.flags |= IEEE80211_CONF_IDLE; 967 local->hw.conf.flags |= IEEE80211_CONF_IDLE;
903 return IEEE80211_CONF_CHANGE_IDLE; 968 return IEEE80211_CONF_CHANGE_IDLE;
904} 969}
@@ -908,16 +973,18 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
908 struct ieee80211_sub_if_data *sdata; 973 struct ieee80211_sub_if_data *sdata;
909 int count = 0; 974 int count = 0;
910 975
976 if (!list_empty(&local->work_list))
977 return ieee80211_idle_off(local, "working");
978
911 if (local->scanning) 979 if (local->scanning)
912 return ieee80211_idle_off(local, "scanning"); 980 return ieee80211_idle_off(local, "scanning");
913 981
914 list_for_each_entry(sdata, &local->interfaces, list) { 982 list_for_each_entry(sdata, &local->interfaces, list) {
915 if (!netif_running(sdata->dev)) 983 if (!ieee80211_sdata_running(sdata))
916 continue; 984 continue;
917 /* do not count disabled managed interfaces */ 985 /* do not count disabled managed interfaces */
918 if (sdata->vif.type == NL80211_IFTYPE_STATION && 986 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
919 !sdata->u.mgd.associated && 987 !sdata->u.mgd.associated)
920 list_empty(&sdata->u.mgd.work_list))
921 continue; 988 continue;
922 /* do not count unused IBSS interfaces */ 989 /* do not count unused IBSS interfaces */
923 if (sdata->vif.type == NL80211_IFTYPE_ADHOC && 990 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
@@ -945,3 +1012,41 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
945 if (chg) 1012 if (chg)
946 ieee80211_hw_config(local, chg); 1013 ieee80211_hw_config(local, chg);
947} 1014}
1015
1016static int netdev_notify(struct notifier_block *nb,
1017 unsigned long state,
1018 void *ndev)
1019{
1020 struct net_device *dev = ndev;
1021 struct ieee80211_sub_if_data *sdata;
1022
1023 if (state != NETDEV_CHANGENAME)
1024 return 0;
1025
1026 if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
1027 return 0;
1028
1029 if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
1030 return 0;
1031
1032 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1033
1034 memcpy(sdata->name, dev->name, IFNAMSIZ);
1035
1036 ieee80211_debugfs_rename_netdev(sdata);
1037 return 0;
1038}
1039
1040static struct notifier_block mac80211_netdev_notifier = {
1041 .notifier_call = netdev_notify,
1042};
1043
1044int ieee80211_iface_init(void)
1045{
1046 return register_netdevice_notifier(&mac80211_netdev_notifier);
1047}
1048
1049void ieee80211_iface_exit(void)
1050{
1051 unregister_netdevice_notifier(&mac80211_netdev_notifier);
1052}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 659a42d529e3..8160d9c5372e 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -139,7 +139,7 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
139 struct ieee80211_sub_if_data, 139 struct ieee80211_sub_if_data,
140 u.ap); 140 u.ap);
141 141
142 ret = drv_set_key(key->local, SET_KEY, &sdata->vif, sta, &key->conf); 142 ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf);
143 143
144 if (!ret) { 144 if (!ret) {
145 spin_lock_bh(&todo_lock); 145 spin_lock_bh(&todo_lock);
@@ -181,7 +181,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
181 struct ieee80211_sub_if_data, 181 struct ieee80211_sub_if_data,
182 u.ap); 182 u.ap);
183 183
184 ret = drv_set_key(key->local, DISABLE_KEY, &sdata->vif, 184 ret = drv_set_key(key->local, DISABLE_KEY, sdata,
185 sta, &key->conf); 185 sta, &key->conf);
186 186
187 if (ret) 187 if (ret)
@@ -421,7 +421,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
421 */ 421 */
422 422
423 /* same here, the AP could be using QoS */ 423 /* same here, the AP could be using QoS */
424 ap = sta_info_get(key->local, key->sdata->u.mgd.bssid); 424 ap = sta_info_get(key->sdata, key->sdata->u.mgd.bssid);
425 if (ap) { 425 if (ap) {
426 if (test_sta_flags(ap, WLAN_STA_WME)) 426 if (test_sta_flags(ap, WLAN_STA_WME))
427 key->conf.flags |= 427 key->conf.flags |=
@@ -443,7 +443,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
443 add_todo(old_key, KEY_FLAG_TODO_DELETE); 443 add_todo(old_key, KEY_FLAG_TODO_DELETE);
444 444
445 add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS); 445 add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS);
446 if (netif_running(sdata->dev)) 446 if (ieee80211_sdata_running(sdata))
447 add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD); 447 add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD);
448 448
449 spin_unlock_irqrestore(&sdata->local->key_lock, flags); 449 spin_unlock_irqrestore(&sdata->local->key_lock, flags);
@@ -509,7 +509,7 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
509{ 509{
510 ASSERT_RTNL(); 510 ASSERT_RTNL();
511 511
512 if (WARN_ON(!netif_running(sdata->dev))) 512 if (WARN_ON(!ieee80211_sdata_running(sdata)))
513 return; 513 return;
514 514
515 ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_ADD); 515 ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_ADD);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index a49f93b79e92..bdc2968c2bbe 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -59,11 +59,17 @@ enum ieee80211_internal_key_flags {
59 KEY_FLAG_TODO_DEFMGMTKEY = BIT(6), 59 KEY_FLAG_TODO_DEFMGMTKEY = BIT(6),
60}; 60};
61 61
62enum ieee80211_internal_tkip_state {
63 TKIP_STATE_NOT_INIT,
64 TKIP_STATE_PHASE1_DONE,
65 TKIP_STATE_PHASE1_HW_UPLOADED,
66};
67
62struct tkip_ctx { 68struct tkip_ctx {
63 u32 iv32; 69 u32 iv32;
64 u16 iv16; 70 u16 iv16;
65 u16 p1k[5]; 71 u16 p1k[5];
66 int initialized; 72 enum ieee80211_internal_tkip_state state;
67}; 73};
68 74
69struct ieee80211_key { 75struct ieee80211_key {
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 8116d1a96a4a..06c33b68d8e5 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -17,7 +17,6 @@
17#include <linux/skbuff.h> 17#include <linux/skbuff.h>
18#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/if_arp.h> 19#include <linux/if_arp.h>
20#include <linux/wireless.h>
21#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
22#include <linux/bitmap.h> 21#include <linux/bitmap.h>
23#include <linux/pm_qos_params.h> 22#include <linux/pm_qos_params.h>
@@ -32,7 +31,12 @@
32#include "led.h" 31#include "led.h"
33#include "cfg.h" 32#include "cfg.h"
34#include "debugfs.h" 33#include "debugfs.h"
35#include "debugfs_netdev.h" 34
35
36bool ieee80211_disable_40mhz_24ghz;
37module_param(ieee80211_disable_40mhz_24ghz, bool, 0644);
38MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz,
39 "Disable 40MHz support in the 2.4GHz band");
36 40
37void ieee80211_configure_filter(struct ieee80211_local *local) 41void ieee80211_configure_filter(struct ieee80211_local *local)
38{ 42{
@@ -102,6 +106,9 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
102 if (scan_chan) { 106 if (scan_chan) {
103 chan = scan_chan; 107 chan = scan_chan;
104 channel_type = NL80211_CHAN_NO_HT; 108 channel_type = NL80211_CHAN_NO_HT;
109 } else if (local->tmp_channel) {
110 chan = scan_chan = local->tmp_channel;
111 channel_type = local->tmp_channel_type;
105 } else { 112 } else {
106 chan = local->oper_channel; 113 chan = local->oper_channel;
107 channel_type = local->oper_channel_type; 114 channel_type = local->oper_channel_type;
@@ -114,6 +121,18 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
114 changed |= IEEE80211_CONF_CHANGE_CHANNEL; 121 changed |= IEEE80211_CONF_CHANGE_CHANNEL;
115 } 122 }
116 123
124 if (!conf_is_ht(&local->hw.conf)) {
125 /*
126 * mac80211.h documents that this is only valid
127 * when the channel is set to an HT type, and
128 * that otherwise STATIC is used.
129 */
130 local->hw.conf.smps_mode = IEEE80211_SMPS_STATIC;
131 } else if (local->hw.conf.smps_mode != local->smps_mode) {
132 local->hw.conf.smps_mode = local->smps_mode;
133 changed |= IEEE80211_CONF_CHANGE_SMPS;
134 }
135
117 if (scan_chan) 136 if (scan_chan)
118 power = chan->max_power; 137 power = chan->max_power;
119 else 138 else
@@ -173,7 +192,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
173 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 192 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
174 sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid; 193 sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid;
175 else if (sdata->vif.type == NL80211_IFTYPE_AP) 194 else if (sdata->vif.type == NL80211_IFTYPE_AP)
176 sdata->vif.bss_conf.bssid = sdata->dev->dev_addr; 195 sdata->vif.bss_conf.bssid = sdata->vif.addr;
177 else if (ieee80211_vif_is_mesh(&sdata->vif)) { 196 else if (ieee80211_vif_is_mesh(&sdata->vif)) {
178 sdata->vif.bss_conf.bssid = zero; 197 sdata->vif.bss_conf.bssid = zero;
179 } else { 198 } else {
@@ -195,7 +214,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
195 } 214 }
196 215
197 if (changed & BSS_CHANGED_BEACON_ENABLED) { 216 if (changed & BSS_CHANGED_BEACON_ENABLED) {
198 if (local->quiescing || !netif_running(sdata->dev) || 217 if (local->quiescing || !ieee80211_sdata_running(sdata) ||
199 test_bit(SCAN_SW_SCANNING, &local->scanning)) { 218 test_bit(SCAN_SW_SCANNING, &local->scanning)) {
200 sdata->vif.bss_conf.enable_beacon = false; 219 sdata->vif.bss_conf.enable_beacon = false;
201 } else { 220 } else {
@@ -223,8 +242,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
223 } 242 }
224 } 243 }
225 244
226 drv_bss_info_changed(local, &sdata->vif, 245 drv_bss_info_changed(local, sdata, &sdata->vif.bss_conf, changed);
227 &sdata->vif.bss_conf, changed);
228} 246}
229 247
230u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) 248u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
@@ -299,6 +317,16 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
299} 317}
300EXPORT_SYMBOL(ieee80211_restart_hw); 318EXPORT_SYMBOL(ieee80211_restart_hw);
301 319
320static void ieee80211_recalc_smps_work(struct work_struct *work)
321{
322 struct ieee80211_local *local =
323 container_of(work, struct ieee80211_local, recalc_smps);
324
325 mutex_lock(&local->iflist_mtx);
326 ieee80211_recalc_smps(local, NULL);
327 mutex_unlock(&local->iflist_mtx);
328}
329
302struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, 330struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
303 const struct ieee80211_ops *ops) 331 const struct ieee80211_ops *ops)
304{ 332{
@@ -333,9 +361,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
333 WIPHY_FLAG_4ADDR_STATION; 361 WIPHY_FLAG_4ADDR_STATION;
334 wiphy->privid = mac80211_wiphy_privid; 362 wiphy->privid = mac80211_wiphy_privid;
335 363
336 /* Yes, putting cfg80211_bss into ieee80211_bss is a hack */ 364 wiphy->bss_priv_size = sizeof(struct ieee80211_bss);
337 wiphy->bss_priv_size = sizeof(struct ieee80211_bss) -
338 sizeof(struct cfg80211_bss);
339 365
340 local = wiphy_priv(wiphy); 366 local = wiphy_priv(wiphy);
341 367
@@ -358,6 +384,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
358 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; 384 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
359 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; 385 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
360 local->user_power_level = -1; 386 local->user_power_level = -1;
387 local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES;
388 local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
361 389
362 INIT_LIST_HEAD(&local->interfaces); 390 INIT_LIST_HEAD(&local->interfaces);
363 mutex_init(&local->iflist_mtx); 391 mutex_init(&local->iflist_mtx);
@@ -369,9 +397,13 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
369 397
370 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); 398 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
371 399
400 ieee80211_work_init(local);
401
372 INIT_WORK(&local->restart_work, ieee80211_restart_work); 402 INIT_WORK(&local->restart_work, ieee80211_restart_work);
373 403
374 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter); 404 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
405 INIT_WORK(&local->recalc_smps, ieee80211_recalc_smps_work);
406 local->smps_mode = IEEE80211_SMPS_OFF;
375 407
376 INIT_WORK(&local->dynamic_ps_enable_work, 408 INIT_WORK(&local->dynamic_ps_enable_work,
377 ieee80211_dynamic_ps_enable_work); 409 ieee80211_dynamic_ps_enable_work);
@@ -461,6 +493,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
461 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) 493 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
462 local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; 494 local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
463 495
496 WARN((local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
497 && (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK),
498 "U-APSD not supported with HW_PS_NULLFUNC_STACK\n");
499
464 /* 500 /*
465 * Calculate scan IE length -- we need this to alloc 501 * Calculate scan IE length -- we need this to alloc
466 * memory and to subtract from the driver limit. It 502 * memory and to subtract from the driver limit. It
@@ -515,13 +551,19 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
515 * and we need some headroom for passing the frame to monitor 551 * and we need some headroom for passing the frame to monitor
516 * interfaces, but never both at the same time. 552 * interfaces, but never both at the same time.
517 */ 553 */
554 BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM !=
555 sizeof(struct ieee80211_tx_status_rtap_hdr));
518 local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom, 556 local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
519 sizeof(struct ieee80211_tx_status_rtap_hdr)); 557 sizeof(struct ieee80211_tx_status_rtap_hdr));
520 558
521 debugfs_hw_add(local); 559 debugfs_hw_add(local);
522 560
561 /*
562 * if the driver doesn't specify a max listen interval we
563 * use 5 which should be a safe default
564 */
523 if (local->hw.max_listen_interval == 0) 565 if (local->hw.max_listen_interval == 0)
524 local->hw.max_listen_interval = 1; 566 local->hw.max_listen_interval = 5;
525 567
526 local->hw.conf.listen_interval = local->hw.max_listen_interval; 568 local->hw.conf.listen_interval = local->hw.max_listen_interval;
527 569
@@ -672,11 +714,19 @@ static int __init ieee80211_init(void)
672 714
673 ret = rc80211_pid_init(); 715 ret = rc80211_pid_init();
674 if (ret) 716 if (ret)
675 return ret; 717 goto err_pid;
676 718
677 ieee80211_debugfs_netdev_init(); 719 ret = ieee80211_iface_init();
720 if (ret)
721 goto err_netdev;
678 722
679 return 0; 723 return 0;
724 err_netdev:
725 rc80211_pid_exit();
726 err_pid:
727 rc80211_minstrel_exit();
728
729 return ret;
680} 730}
681 731
682static void __exit ieee80211_exit(void) 732static void __exit ieee80211_exit(void)
@@ -693,7 +743,7 @@ static void __exit ieee80211_exit(void)
693 if (mesh_allocated) 743 if (mesh_allocated)
694 ieee80211s_stop(); 744 ieee80211s_stop();
695 745
696 ieee80211_debugfs_netdev_exit(); 746 ieee80211_iface_exit();
697} 747}
698 748
699 749
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 6a4331429598..61080c5fad50 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -457,7 +457,7 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
457 457
458#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 458#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
459 printk(KERN_DEBUG "%s: running mesh housekeeping\n", 459 printk(KERN_DEBUG "%s: running mesh housekeeping\n",
460 sdata->dev->name); 460 sdata->name);
461#endif 461#endif
462 462
463 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); 463 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
@@ -565,7 +565,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
565 565
566 /* ignore ProbeResp to foreign address */ 566 /* ignore ProbeResp to foreign address */
567 if (stype == IEEE80211_STYPE_PROBE_RESP && 567 if (stype == IEEE80211_STYPE_PROBE_RESP &&
568 compare_ether_addr(mgmt->da, sdata->dev->dev_addr)) 568 compare_ether_addr(mgmt->da, sdata->vif.addr))
569 return; 569 return;
570 570
571 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 571 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -645,7 +645,7 @@ static void ieee80211_mesh_work(struct work_struct *work)
645 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 645 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
646 struct sk_buff *skb; 646 struct sk_buff *skb;
647 647
648 if (!netif_running(sdata->dev)) 648 if (!ieee80211_sdata_running(sdata))
649 return; 649 return;
650 650
651 if (local->scanning) 651 if (local->scanning)
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index d28acb6b1f81..ce84237ebad3 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -128,9 +128,9 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
128 IEEE80211_STYPE_ACTION); 128 IEEE80211_STYPE_ACTION);
129 129
130 memcpy(mgmt->da, da, ETH_ALEN); 130 memcpy(mgmt->da, da, ETH_ALEN);
131 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 131 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
132 /* BSSID == SA */ 132 /* BSSID == SA */
133 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 133 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
134 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 134 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
135 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; 135 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
136 136
@@ -222,7 +222,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
222 IEEE80211_STYPE_ACTION); 222 IEEE80211_STYPE_ACTION);
223 223
224 memcpy(mgmt->da, ra, ETH_ALEN); 224 memcpy(mgmt->da, ra, ETH_ALEN);
225 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 225 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
226 /* BSSID is left zeroed, wildcard value */ 226 /* BSSID is left zeroed, wildcard value */
227 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 227 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
228 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; 228 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
@@ -335,7 +335,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
335 bool process = true; 335 bool process = true;
336 336
337 rcu_read_lock(); 337 rcu_read_lock();
338 sta = sta_info_get(local, mgmt->sa); 338 sta = sta_info_get(sdata, mgmt->sa);
339 if (!sta) { 339 if (!sta) {
340 rcu_read_unlock(); 340 rcu_read_unlock();
341 return 0; 341 return 0;
@@ -374,7 +374,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
374 new_metric = MAX_METRIC; 374 new_metric = MAX_METRIC;
375 exp_time = TU_TO_EXP_TIME(orig_lifetime); 375 exp_time = TU_TO_EXP_TIME(orig_lifetime);
376 376
377 if (memcmp(orig_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) { 377 if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) {
378 /* This MP is the originator, we are not interested in this 378 /* This MP is the originator, we are not interested in this
379 * frame, except for updating transmitter's path info. 379 * frame, except for updating transmitter's path info.
380 */ 380 */
@@ -486,7 +486,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
486 486
487 mhwmp_dbg("received PREQ from %pM\n", orig_addr); 487 mhwmp_dbg("received PREQ from %pM\n", orig_addr);
488 488
489 if (memcmp(target_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) { 489 if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) {
490 mhwmp_dbg("PREQ is for us\n"); 490 mhwmp_dbg("PREQ is for us\n");
491 forward = false; 491 forward = false;
492 reply = true; 492 reply = true;
@@ -579,7 +579,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
579 * replies 579 * replies
580 */ 580 */
581 target_addr = PREP_IE_TARGET_ADDR(prep_elem); 581 target_addr = PREP_IE_TARGET_ADDR(prep_elem);
582 if (memcmp(target_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) 582 if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0)
583 /* destination, no forwarding required */ 583 /* destination, no forwarding required */
584 return; 584 return;
585 585
@@ -890,7 +890,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
890 target_flags = MP_F_RF; 890 target_flags = MP_F_RF;
891 891
892 spin_unlock_bh(&mpath->state_lock); 892 spin_unlock_bh(&mpath->state_lock);
893 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr, 893 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr,
894 cpu_to_le32(ifmsh->sn), target_flags, mpath->dst, 894 cpu_to_le32(ifmsh->sn), target_flags, mpath->dst,
895 cpu_to_le32(mpath->sn), broadcast_addr, 0, 895 cpu_to_le32(mpath->sn), broadcast_addr, 0,
896 ttl, cpu_to_le32(lifetime), 0, 896 ttl, cpu_to_le32(lifetime), 0,
@@ -939,7 +939,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
939 if (time_after(jiffies, 939 if (time_after(jiffies,
940 mpath->exp_time - 940 mpath->exp_time -
941 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) && 941 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
942 !memcmp(sdata->dev->dev_addr, hdr->addr4, ETH_ALEN) && 942 !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) &&
943 !(mpath->flags & MESH_PATH_RESOLVING) && 943 !(mpath->flags & MESH_PATH_RESOLVING) &&
944 !(mpath->flags & MESH_PATH_FIXED)) { 944 !(mpath->flags & MESH_PATH_FIXED)) {
945 mesh_queue_preq(mpath, 945 mesh_queue_preq(mpath,
@@ -1010,7 +1010,7 @@ mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
1010{ 1010{
1011 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 1011 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1012 1012
1013 mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->dev->dev_addr, 1013 mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->vif.addr,
1014 cpu_to_le32(++ifmsh->sn), 1014 cpu_to_le32(++ifmsh->sn),
1015 0, NULL, 0, broadcast_addr, 1015 0, NULL, 0, broadcast_addr,
1016 0, MESH_TTL, 0, 0, 0, sdata); 1016 0, MESH_TTL, 0, 0, 0, sdata);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 0192cfdacae4..2312efe04c62 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -260,7 +260,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
260 int err = 0; 260 int err = 0;
261 u32 hash_idx; 261 u32 hash_idx;
262 262
263 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) 263 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
264 /* never add ourselves as neighbours */ 264 /* never add ourselves as neighbours */
265 return -ENOTSUPP; 265 return -ENOTSUPP;
266 266
@@ -377,7 +377,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
377 int err = 0; 377 int err = 0;
378 u32 hash_idx; 378 u32 hash_idx;
379 379
380 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) 380 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
381 /* never add ourselves as neighbours */ 381 /* never add ourselves as neighbours */
382 return -ENOTSUPP; 382 return -ENOTSUPP;
383 383
@@ -605,7 +605,7 @@ void mesh_path_discard_frame(struct sk_buff *skb,
605 struct mesh_path *mpath; 605 struct mesh_path *mpath;
606 u32 sn = 0; 606 u32 sn = 0;
607 607
608 if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) { 608 if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
609 u8 *ra, *da; 609 u8 *ra, *da;
610 610
611 da = hdr->addr3; 611 da = hdr->addr3;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 0f7c6e6a4248..1a29c4a8139e 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -102,7 +102,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
102 if (local->num_sta >= MESH_MAX_PLINKS) 102 if (local->num_sta >= MESH_MAX_PLINKS)
103 return NULL; 103 return NULL;
104 104
105 sta = sta_info_alloc(sdata, hw_addr, GFP_ATOMIC); 105 sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
106 if (!sta) 106 if (!sta)
107 return NULL; 107 return NULL;
108 108
@@ -169,7 +169,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
169 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 169 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
170 IEEE80211_STYPE_ACTION); 170 IEEE80211_STYPE_ACTION);
171 memcpy(mgmt->da, da, ETH_ALEN); 171 memcpy(mgmt->da, da, ETH_ALEN);
172 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 172 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
173 /* BSSID is left zeroed, wildcard value */ 173 /* BSSID is left zeroed, wildcard value */
174 mgmt->u.action.category = MESH_PLINK_CATEGORY; 174 mgmt->u.action.category = MESH_PLINK_CATEGORY;
175 mgmt->u.action.u.plink_action.action_code = action; 175 mgmt->u.action.u.plink_action.action_code = action;
@@ -234,14 +234,14 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data
234 234
235 rcu_read_lock(); 235 rcu_read_lock();
236 236
237 sta = sta_info_get(local, hw_addr); 237 sta = sta_info_get(sdata, hw_addr);
238 if (!sta) { 238 if (!sta) {
239 rcu_read_unlock();
240
239 sta = mesh_plink_alloc(sdata, hw_addr, rates); 241 sta = mesh_plink_alloc(sdata, hw_addr, rates);
240 if (!sta) { 242 if (!sta)
241 rcu_read_unlock();
242 return; 243 return;
243 } 244 if (sta_info_insert_rcu(sta)) {
244 if (sta_info_insert(sta)) {
245 rcu_read_unlock(); 245 rcu_read_unlock();
246 return; 246 return;
247 } 247 }
@@ -455,7 +455,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
455 455
456 rcu_read_lock(); 456 rcu_read_lock();
457 457
458 sta = sta_info_get(local, mgmt->sa); 458 sta = sta_info_get(sdata, mgmt->sa);
459 if (!sta && ftype != PLINK_OPEN) { 459 if (!sta && ftype != PLINK_OPEN) {
460 mpl_dbg("Mesh plink: cls or cnf from unknown peer\n"); 460 mpl_dbg("Mesh plink: cls or cnf from unknown peer\n");
461 rcu_read_unlock(); 461 rcu_read_unlock();
@@ -485,9 +485,11 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
485 } else if (!sta) { 485 } else if (!sta) {
486 /* ftype == PLINK_OPEN */ 486 /* ftype == PLINK_OPEN */
487 u32 rates; 487 u32 rates;
488
489 rcu_read_unlock();
490
488 if (!mesh_plink_free_count(sdata)) { 491 if (!mesh_plink_free_count(sdata)) {
489 mpl_dbg("Mesh plink error: no more free plinks\n"); 492 mpl_dbg("Mesh plink error: no more free plinks\n");
490 rcu_read_unlock();
491 return; 493 return;
492 } 494 }
493 495
@@ -495,10 +497,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
495 sta = mesh_plink_alloc(sdata, mgmt->sa, rates); 497 sta = mesh_plink_alloc(sdata, mgmt->sa, rates);
496 if (!sta) { 498 if (!sta) {
497 mpl_dbg("Mesh plink error: plink table full\n"); 499 mpl_dbg("Mesh plink error: plink table full\n");
498 rcu_read_unlock();
499 return; 500 return;
500 } 501 }
501 if (sta_info_insert(sta)) { 502 if (sta_info_insert_rcu(sta)) {
502 rcu_read_unlock(); 503 rcu_read_unlock();
503 return; 504 return;
504 } 505 }
@@ -743,7 +744,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
743 break; 744 break;
744 default: 745 default:
745 /* should not get here, PLINK_BLOCKED is dealt with at the 746 /* should not get here, PLINK_BLOCKED is dealt with at the
746 * beggining of the function 747 * beginning of the function
747 */ 748 */
748 spin_unlock_bh(&sta->lock); 749 spin_unlock_bh(&sta->lock);
749 break; 750 break;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index d8d50fb5e823..be5f723d643a 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -27,10 +27,6 @@
27#include "rate.h" 27#include "rate.h"
28#include "led.h" 28#include "led.h"
29 29
30#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
31#define IEEE80211_AUTH_MAX_TRIES 3
32#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
33#define IEEE80211_ASSOC_MAX_TRIES 3
34#define IEEE80211_MAX_PROBE_TRIES 5 30#define IEEE80211_MAX_PROBE_TRIES 5
35 31
36/* 32/*
@@ -75,11 +71,8 @@ enum rx_mgmt_action {
75 /* caller must call cfg80211_send_disassoc() */ 71 /* caller must call cfg80211_send_disassoc() */
76 RX_MGMT_CFG80211_DISASSOC, 72 RX_MGMT_CFG80211_DISASSOC,
77 73
78 /* caller must call cfg80211_auth_timeout() & free work */ 74 /* caller must tell cfg80211 about internal error */
79 RX_MGMT_CFG80211_AUTH_TO, 75 RX_MGMT_CFG80211_ASSOC_ERROR,
80
81 /* caller must call cfg80211_assoc_timeout() & free work */
82 RX_MGMT_CFG80211_ASSOC_TO,
83}; 76};
84 77
85/* utils */ 78/* utils */
@@ -122,27 +115,6 @@ static int ecw2cw(int ecw)
122 return (1 << ecw) - 1; 115 return (1 << ecw) - 1;
123} 116}
124 117
125static int ieee80211_compatible_rates(struct ieee80211_bss *bss,
126 struct ieee80211_supported_band *sband,
127 u32 *rates)
128{
129 int i, j, count;
130 *rates = 0;
131 count = 0;
132 for (i = 0; i < bss->supp_rates_len; i++) {
133 int rate = (bss->supp_rates[i] & 0x7F) * 5;
134
135 for (j = 0; j < sband->n_bitrates; j++)
136 if (sband->bitrates[j].bitrate == rate) {
137 *rates |= BIT(j);
138 count++;
139 break;
140 }
141 }
142
143 return count;
144}
145
146/* 118/*
147 * ieee80211_enable_ht should be called only after the operating band 119 * ieee80211_enable_ht should be called only after the operating band
148 * has been determined as ht configuration depends on the hw's 120 * has been determined as ht configuration depends on the hw's
@@ -202,10 +174,11 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
202 ieee80211_hw_config(local, 0); 174 ieee80211_hw_config(local, 0);
203 175
204 rcu_read_lock(); 176 rcu_read_lock();
205 sta = sta_info_get(local, bssid); 177 sta = sta_info_get(sdata, bssid);
206 if (sta) 178 if (sta)
207 rate_control_rate_update(local, sband, sta, 179 rate_control_rate_update(local, sband, sta,
208 IEEE80211_RC_HT_CHANGED); 180 IEEE80211_RC_HT_CHANGED,
181 local->oper_channel_type);
209 rcu_read_unlock(); 182 rcu_read_unlock();
210 } 183 }
211 184
@@ -228,209 +201,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
228 201
229/* frame sending functions */ 202/* frame sending functions */
230 203
231static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
232 struct ieee80211_mgd_work *wk)
233{
234 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
235 struct ieee80211_local *local = sdata->local;
236 struct sk_buff *skb;
237 struct ieee80211_mgmt *mgmt;
238 u8 *pos;
239 const u8 *ies, *ht_ie;
240 int i, len, count, rates_len, supp_rates_len;
241 u16 capab;
242 int wmm = 0;
243 struct ieee80211_supported_band *sband;
244 u32 rates = 0;
245
246 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
247 sizeof(*mgmt) + 200 + wk->ie_len +
248 wk->ssid_len);
249 if (!skb) {
250 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
251 "frame\n", sdata->dev->name);
252 return;
253 }
254 skb_reserve(skb, local->hw.extra_tx_headroom);
255
256 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
257
258 capab = ifmgd->capab;
259
260 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) {
261 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
262 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
263 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
264 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
265 }
266
267 if (wk->bss->cbss.capability & WLAN_CAPABILITY_PRIVACY)
268 capab |= WLAN_CAPABILITY_PRIVACY;
269 if (wk->bss->wmm_used)
270 wmm = 1;
271
272 /* get all rates supported by the device and the AP as
273 * some APs don't like getting a superset of their rates
274 * in the association request (e.g. D-Link DAP 1353 in
275 * b-only mode) */
276 rates_len = ieee80211_compatible_rates(wk->bss, sband, &rates);
277
278 if ((wk->bss->cbss.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
279 (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
280 capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
281
282 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
283 memset(mgmt, 0, 24);
284 memcpy(mgmt->da, wk->bss->cbss.bssid, ETH_ALEN);
285 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
286 memcpy(mgmt->bssid, wk->bss->cbss.bssid, ETH_ALEN);
287
288 if (!is_zero_ether_addr(wk->prev_bssid)) {
289 skb_put(skb, 10);
290 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
291 IEEE80211_STYPE_REASSOC_REQ);
292 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
293 mgmt->u.reassoc_req.listen_interval =
294 cpu_to_le16(local->hw.conf.listen_interval);
295 memcpy(mgmt->u.reassoc_req.current_ap, wk->prev_bssid,
296 ETH_ALEN);
297 } else {
298 skb_put(skb, 4);
299 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
300 IEEE80211_STYPE_ASSOC_REQ);
301 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
302 mgmt->u.assoc_req.listen_interval =
303 cpu_to_le16(local->hw.conf.listen_interval);
304 }
305
306 /* SSID */
307 ies = pos = skb_put(skb, 2 + wk->ssid_len);
308 *pos++ = WLAN_EID_SSID;
309 *pos++ = wk->ssid_len;
310 memcpy(pos, wk->ssid, wk->ssid_len);
311
312 /* add all rates which were marked to be used above */
313 supp_rates_len = rates_len;
314 if (supp_rates_len > 8)
315 supp_rates_len = 8;
316
317 len = sband->n_bitrates;
318 pos = skb_put(skb, supp_rates_len + 2);
319 *pos++ = WLAN_EID_SUPP_RATES;
320 *pos++ = supp_rates_len;
321
322 count = 0;
323 for (i = 0; i < sband->n_bitrates; i++) {
324 if (BIT(i) & rates) {
325 int rate = sband->bitrates[i].bitrate;
326 *pos++ = (u8) (rate / 5);
327 if (++count == 8)
328 break;
329 }
330 }
331
332 if (rates_len > count) {
333 pos = skb_put(skb, rates_len - count + 2);
334 *pos++ = WLAN_EID_EXT_SUPP_RATES;
335 *pos++ = rates_len - count;
336
337 for (i++; i < sband->n_bitrates; i++) {
338 if (BIT(i) & rates) {
339 int rate = sband->bitrates[i].bitrate;
340 *pos++ = (u8) (rate / 5);
341 }
342 }
343 }
344
345 if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
346 /* 1. power capabilities */
347 pos = skb_put(skb, 4);
348 *pos++ = WLAN_EID_PWR_CAPABILITY;
349 *pos++ = 2;
350 *pos++ = 0; /* min tx power */
351 *pos++ = local->hw.conf.channel->max_power; /* max tx power */
352
353 /* 2. supported channels */
354 /* TODO: get this in reg domain format */
355 pos = skb_put(skb, 2 * sband->n_channels + 2);
356 *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
357 *pos++ = 2 * sband->n_channels;
358 for (i = 0; i < sband->n_channels; i++) {
359 *pos++ = ieee80211_frequency_to_channel(
360 sband->channels[i].center_freq);
361 *pos++ = 1; /* one channel in the subband*/
362 }
363 }
364
365 if (wk->ie_len && wk->ie) {
366 pos = skb_put(skb, wk->ie_len);
367 memcpy(pos, wk->ie, wk->ie_len);
368 }
369
370 if (wmm && (ifmgd->flags & IEEE80211_STA_WMM_ENABLED)) {
371 pos = skb_put(skb, 9);
372 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
373 *pos++ = 7; /* len */
374 *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
375 *pos++ = 0x50;
376 *pos++ = 0xf2;
377 *pos++ = 2; /* WME */
378 *pos++ = 0; /* WME info */
379 *pos++ = 1; /* WME ver */
380 *pos++ = 0;
381 }
382
383 /* wmm support is a must to HT */
384 /*
385 * IEEE802.11n does not allow TKIP/WEP as pairwise
386 * ciphers in HT mode. We still associate in non-ht
387 * mode (11a/b/g) if any one of these ciphers is
388 * configured as pairwise.
389 */
390 if (wmm && (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) &&
391 sband->ht_cap.ht_supported &&
392 (ht_ie = ieee80211_bss_get_ie(&wk->bss->cbss, WLAN_EID_HT_INFORMATION)) &&
393 ht_ie[1] >= sizeof(struct ieee80211_ht_info) &&
394 (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))) {
395 struct ieee80211_ht_info *ht_info =
396 (struct ieee80211_ht_info *)(ht_ie + 2);
397 u16 cap = sband->ht_cap.cap;
398 __le16 tmp;
399 u32 flags = local->hw.conf.channel->flags;
400
401 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
402 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
403 if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
404 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
405 cap &= ~IEEE80211_HT_CAP_SGI_40;
406 }
407 break;
408 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
409 if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
410 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
411 cap &= ~IEEE80211_HT_CAP_SGI_40;
412 }
413 break;
414 }
415
416 tmp = cpu_to_le16(cap);
417 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2);
418 *pos++ = WLAN_EID_HT_CAPABILITY;
419 *pos++ = sizeof(struct ieee80211_ht_cap);
420 memset(pos, 0, sizeof(struct ieee80211_ht_cap));
421 memcpy(pos, &tmp, sizeof(u16));
422 pos += sizeof(u16);
423 /* TODO: needs a define here for << 2 */
424 *pos++ = sband->ht_cap.ampdu_factor |
425 (sband->ht_cap.ampdu_density << 2);
426 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
427 }
428
429 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
430 ieee80211_tx_skb(sdata, skb);
431}
432
433
434static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, 204static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
435 const u8 *bssid, u16 stype, u16 reason, 205 const u8 *bssid, u16 stype, u16 reason,
436 void *cookie) 206 void *cookie)
@@ -443,7 +213,7 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
443 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); 213 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
444 if (!skb) { 214 if (!skb) {
445 printk(KERN_DEBUG "%s: failed to allocate buffer for " 215 printk(KERN_DEBUG "%s: failed to allocate buffer for "
446 "deauth/disassoc frame\n", sdata->dev->name); 216 "deauth/disassoc frame\n", sdata->name);
447 return; 217 return;
448 } 218 }
449 skb_reserve(skb, local->hw.extra_tx_headroom); 219 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -451,7 +221,7 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
451 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 221 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
452 memset(mgmt, 0, 24); 222 memset(mgmt, 0, 24);
453 memcpy(mgmt->da, bssid, ETH_ALEN); 223 memcpy(mgmt->da, bssid, ETH_ALEN);
454 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 224 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
455 memcpy(mgmt->bssid, bssid, ETH_ALEN); 225 memcpy(mgmt->bssid, bssid, ETH_ALEN);
456 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype); 226 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
457 skb_put(skb, 2); 227 skb_put(skb, 2);
@@ -476,30 +246,15 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
476void ieee80211_send_pspoll(struct ieee80211_local *local, 246void ieee80211_send_pspoll(struct ieee80211_local *local,
477 struct ieee80211_sub_if_data *sdata) 247 struct ieee80211_sub_if_data *sdata)
478{ 248{
479 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
480 struct ieee80211_pspoll *pspoll; 249 struct ieee80211_pspoll *pspoll;
481 struct sk_buff *skb; 250 struct sk_buff *skb;
482 u16 fc;
483 251
484 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll)); 252 skb = ieee80211_pspoll_get(&local->hw, &sdata->vif);
485 if (!skb) { 253 if (!skb)
486 printk(KERN_DEBUG "%s: failed to allocate buffer for "
487 "pspoll frame\n", sdata->dev->name);
488 return; 254 return;
489 }
490 skb_reserve(skb, local->hw.extra_tx_headroom);
491 255
492 pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll)); 256 pspoll = (struct ieee80211_pspoll *) skb->data;
493 memset(pspoll, 0, sizeof(*pspoll)); 257 pspoll->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
494 fc = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL | IEEE80211_FCTL_PM;
495 pspoll->frame_control = cpu_to_le16(fc);
496 pspoll->aid = cpu_to_le16(ifmgd->aid);
497
498 /* aid in PS-Poll has its two MSBs each set to 1 */
499 pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
500
501 memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
502 memcpy(pspoll->ta, sdata->dev->dev_addr, ETH_ALEN);
503 258
504 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 259 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
505 ieee80211_tx_skb(sdata, skb); 260 ieee80211_tx_skb(sdata, skb);
@@ -510,30 +265,47 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
510 int powersave) 265 int powersave)
511{ 266{
512 struct sk_buff *skb; 267 struct sk_buff *skb;
268 struct ieee80211_hdr_3addr *nullfunc;
269
270 skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif);
271 if (!skb)
272 return;
273
274 nullfunc = (struct ieee80211_hdr_3addr *) skb->data;
275 if (powersave)
276 nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
277
278 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
279 ieee80211_tx_skb(sdata, skb);
280}
281
282static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
283 struct ieee80211_sub_if_data *sdata)
284{
285 struct sk_buff *skb;
513 struct ieee80211_hdr *nullfunc; 286 struct ieee80211_hdr *nullfunc;
514 __le16 fc; 287 __le16 fc;
515 288
516 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) 289 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
517 return; 290 return;
518 291
519 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24); 292 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
520 if (!skb) { 293 if (!skb) {
521 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc " 294 printk(KERN_DEBUG "%s: failed to allocate buffer for 4addr "
522 "frame\n", sdata->dev->name); 295 "nullfunc frame\n", sdata->name);
523 return; 296 return;
524 } 297 }
525 skb_reserve(skb, local->hw.extra_tx_headroom); 298 skb_reserve(skb, local->hw.extra_tx_headroom);
526 299
527 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24); 300 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 30);
528 memset(nullfunc, 0, 24); 301 memset(nullfunc, 0, 30);
529 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | 302 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
530 IEEE80211_FCTL_TODS); 303 IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
531 if (powersave)
532 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
533 nullfunc->frame_control = fc; 304 nullfunc->frame_control = fc;
534 memcpy(nullfunc->addr1, sdata->u.mgd.bssid, ETH_ALEN); 305 memcpy(nullfunc->addr1, sdata->u.mgd.bssid, ETH_ALEN);
535 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN); 306 memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
536 memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN); 307 memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN);
308 memcpy(nullfunc->addr4, sdata->vif.addr, ETH_ALEN);
537 309
538 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 310 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
539 ieee80211_tx_skb(sdata, skb); 311 ieee80211_tx_skb(sdata, skb);
@@ -546,7 +318,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
546 container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work); 318 container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work);
547 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 319 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
548 320
549 if (!netif_running(sdata->dev)) 321 if (!ieee80211_sdata_running(sdata))
550 return; 322 return;
551 323
552 mutex_lock(&ifmgd->mtx); 324 mutex_lock(&ifmgd->mtx);
@@ -557,7 +329,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
557 ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL); 329 ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL);
558 330
559 /* XXX: shouldn't really modify cfg80211-owned data! */ 331 /* XXX: shouldn't really modify cfg80211-owned data! */
560 ifmgd->associated->cbss.channel = sdata->local->oper_channel; 332 ifmgd->associated->channel = sdata->local->oper_channel;
561 333
562 ieee80211_wake_queues_by_reason(&sdata->local->hw, 334 ieee80211_wake_queues_by_reason(&sdata->local->hw,
563 IEEE80211_QUEUE_STOP_REASON_CSA); 335 IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -584,6 +356,8 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
584 struct ieee80211_channel_sw_ie *sw_elem, 356 struct ieee80211_channel_sw_ie *sw_elem,
585 struct ieee80211_bss *bss) 357 struct ieee80211_bss *bss)
586{ 358{
359 struct cfg80211_bss *cbss =
360 container_of((void *)bss, struct cfg80211_bss, priv);
587 struct ieee80211_channel *new_ch; 361 struct ieee80211_channel *new_ch;
588 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 362 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
589 int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num); 363 int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num);
@@ -617,7 +391,7 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
617 mod_timer(&ifmgd->chswitch_timer, 391 mod_timer(&ifmgd->chswitch_timer,
618 jiffies + 392 jiffies +
619 msecs_to_jiffies(sw_elem->count * 393 msecs_to_jiffies(sw_elem->count *
620 bss->cbss.beacon_interval)); 394 cbss->beacon_interval));
621 } 395 }
622} 396}
623 397
@@ -661,6 +435,11 @@ static void ieee80211_enable_ps(struct ieee80211_local *local,
661 } else { 435 } else {
662 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) 436 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
663 ieee80211_send_nullfunc(local, sdata, 1); 437 ieee80211_send_nullfunc(local, sdata, 1);
438
439 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
440 (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS))
441 return;
442
664 conf->flags |= IEEE80211_CONF_PS; 443 conf->flags |= IEEE80211_CONF_PS;
665 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 444 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
666 } 445 }
@@ -691,8 +470,13 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
691 return; 470 return;
692 } 471 }
693 472
473 if (!list_empty(&local->work_list)) {
474 local->ps_sdata = NULL;
475 goto change;
476 }
477
694 list_for_each_entry(sdata, &local->interfaces, list) { 478 list_for_each_entry(sdata, &local->interfaces, list) {
695 if (!netif_running(sdata->dev)) 479 if (!ieee80211_sdata_running(sdata))
696 continue; 480 continue;
697 if (sdata->vif.type != NL80211_IFTYPE_STATION) 481 if (sdata->vif.type != NL80211_IFTYPE_STATION)
698 continue; 482 continue;
@@ -701,7 +485,8 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
701 } 485 }
702 486
703 if (count == 1 && found->u.mgd.powersave && 487 if (count == 1 && found->u.mgd.powersave &&
704 found->u.mgd.associated && list_empty(&found->u.mgd.work_list) && 488 found->u.mgd.associated &&
489 found->u.mgd.associated->beacon_ies &&
705 !(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL | 490 !(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
706 IEEE80211_STA_CONNECTION_POLL))) { 491 IEEE80211_STA_CONNECTION_POLL))) {
707 s32 beaconint_us; 492 s32 beaconint_us;
@@ -715,20 +500,29 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
715 if (beaconint_us > latency) { 500 if (beaconint_us > latency) {
716 local->ps_sdata = NULL; 501 local->ps_sdata = NULL;
717 } else { 502 } else {
718 u8 dtimper = found->vif.bss_conf.dtim_period; 503 struct ieee80211_bss *bss;
719 int maxslp = 1; 504 int maxslp = 1;
505 u8 dtimper;
506
507 bss = (void *)found->u.mgd.associated->priv;
508 dtimper = bss->dtim_period;
720 509
721 if (dtimper > 1) 510 /* If the TIM IE is invalid, pretend the value is 1 */
511 if (!dtimper)
512 dtimper = 1;
513 else if (dtimper > 1)
722 maxslp = min_t(int, dtimper, 514 maxslp = min_t(int, dtimper,
723 latency / beaconint_us); 515 latency / beaconint_us);
724 516
725 local->hw.conf.max_sleep_period = maxslp; 517 local->hw.conf.max_sleep_period = maxslp;
518 local->hw.conf.ps_dtim_period = dtimper;
726 local->ps_sdata = found; 519 local->ps_sdata = found;
727 } 520 }
728 } else { 521 } else {
729 local->ps_sdata = NULL; 522 local->ps_sdata = NULL;
730 } 523 }
731 524
525 change:
732 ieee80211_change_ps(local); 526 ieee80211_change_ps(local);
733} 527}
734 528
@@ -753,6 +547,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
753 container_of(work, struct ieee80211_local, 547 container_of(work, struct ieee80211_local,
754 dynamic_ps_enable_work); 548 dynamic_ps_enable_work);
755 struct ieee80211_sub_if_data *sdata = local->ps_sdata; 549 struct ieee80211_sub_if_data *sdata = local->ps_sdata;
550 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
756 551
757 /* can only happen when PS was just disabled anyway */ 552 /* can only happen when PS was just disabled anyway */
758 if (!sdata) 553 if (!sdata)
@@ -761,11 +556,17 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
761 if (local->hw.conf.flags & IEEE80211_CONF_PS) 556 if (local->hw.conf.flags & IEEE80211_CONF_PS)
762 return; 557 return;
763 558
764 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) 559 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
560 (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)))
765 ieee80211_send_nullfunc(local, sdata, 1); 561 ieee80211_send_nullfunc(local, sdata, 1);
766 562
767 local->hw.conf.flags |= IEEE80211_CONF_PS; 563 if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
768 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 564 (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) ||
565 (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
566 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
567 local->hw.conf.flags |= IEEE80211_CONF_PS;
568 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
569 }
769} 570}
770 571
771void ieee80211_dynamic_ps_timer(unsigned long data) 572void ieee80211_dynamic_ps_timer(unsigned long data)
@@ -786,9 +587,9 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
786 struct ieee80211_tx_queue_params params; 587 struct ieee80211_tx_queue_params params;
787 size_t left; 588 size_t left;
788 int count; 589 int count;
789 u8 *pos; 590 u8 *pos, uapsd_queues = 0;
790 591
791 if (!(ifmgd->flags & IEEE80211_STA_WMM_ENABLED)) 592 if (local->hw.queues < 4)
792 return; 593 return;
793 594
794 if (!wmm_param) 595 if (!wmm_param)
@@ -796,6 +597,10 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
796 597
797 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) 598 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
798 return; 599 return;
600
601 if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
602 uapsd_queues = local->uapsd_queues;
603
799 count = wmm_param[6] & 0x0f; 604 count = wmm_param[6] & 0x0f;
800 if (count == ifmgd->wmm_last_param_set) 605 if (count == ifmgd->wmm_last_param_set)
801 return; 606 return;
@@ -810,6 +615,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
810 for (; left >= 4; left -= 4, pos += 4) { 615 for (; left >= 4; left -= 4, pos += 4) {
811 int aci = (pos[0] >> 5) & 0x03; 616 int aci = (pos[0] >> 5) & 0x03;
812 int acm = (pos[0] >> 4) & 0x01; 617 int acm = (pos[0] >> 4) & 0x01;
618 bool uapsd = false;
813 int queue; 619 int queue;
814 620
815 switch (aci) { 621 switch (aci) {
@@ -817,22 +623,30 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
817 queue = 3; 623 queue = 3;
818 if (acm) 624 if (acm)
819 local->wmm_acm |= BIT(1) | BIT(2); /* BK/- */ 625 local->wmm_acm |= BIT(1) | BIT(2); /* BK/- */
626 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
627 uapsd = true;
820 break; 628 break;
821 case 2: /* AC_VI */ 629 case 2: /* AC_VI */
822 queue = 1; 630 queue = 1;
823 if (acm) 631 if (acm)
824 local->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */ 632 local->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */
633 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
634 uapsd = true;
825 break; 635 break;
826 case 3: /* AC_VO */ 636 case 3: /* AC_VO */
827 queue = 0; 637 queue = 0;
828 if (acm) 638 if (acm)
829 local->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */ 639 local->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */
640 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
641 uapsd = true;
830 break; 642 break;
831 case 0: /* AC_BE */ 643 case 0: /* AC_BE */
832 default: 644 default:
833 queue = 2; 645 queue = 2;
834 if (acm) 646 if (acm)
835 local->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */ 647 local->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */
648 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
649 uapsd = true;
836 break; 650 break;
837 } 651 }
838 652
@@ -840,11 +654,14 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
840 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4); 654 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4);
841 params.cw_min = ecw2cw(pos[1] & 0x0f); 655 params.cw_min = ecw2cw(pos[1] & 0x0f);
842 params.txop = get_unaligned_le16(pos + 2); 656 params.txop = get_unaligned_le16(pos + 2);
657 params.uapsd = uapsd;
658
843#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 659#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
844 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " 660 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
845 "cWmin=%d cWmax=%d txop=%d\n", 661 "cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
846 wiphy_name(local->hw.wiphy), queue, aci, acm, 662 wiphy_name(local->hw.wiphy), queue, aci, acm,
847 params.aifs, params.cw_min, params.cw_max, params.txop); 663 params.aifs, params.cw_min, params.cw_max, params.txop,
664 params.uapsd);
848#endif 665#endif
849 if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx) 666 if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx)
850 printk(KERN_DEBUG "%s: failed to set TX queue " 667 printk(KERN_DEBUG "%s: failed to set TX queue "
@@ -871,6 +688,8 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
871 } 688 }
872 689
873 use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); 690 use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
691 if (sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ)
692 use_short_slot = true;
874 693
875 if (use_protection != bss_conf->use_cts_prot) { 694 if (use_protection != bss_conf->use_cts_prot) {
876 bss_conf->use_cts_prot = use_protection; 695 bss_conf->use_cts_prot = use_protection;
@@ -891,30 +710,36 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
891} 710}
892 711
893static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, 712static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
894 struct ieee80211_mgd_work *wk, 713 struct cfg80211_bss *cbss,
895 u32 bss_info_changed) 714 u32 bss_info_changed)
896{ 715{
716 struct ieee80211_bss *bss = (void *)cbss->priv;
897 struct ieee80211_local *local = sdata->local; 717 struct ieee80211_local *local = sdata->local;
898 struct ieee80211_bss *bss = wk->bss;
899 718
900 bss_info_changed |= BSS_CHANGED_ASSOC; 719 bss_info_changed |= BSS_CHANGED_ASSOC;
901 /* set timing information */ 720 /* set timing information */
902 sdata->vif.bss_conf.beacon_int = bss->cbss.beacon_interval; 721 sdata->vif.bss_conf.beacon_int = cbss->beacon_interval;
903 sdata->vif.bss_conf.timestamp = bss->cbss.tsf; 722 sdata->vif.bss_conf.timestamp = cbss->tsf;
904 sdata->vif.bss_conf.dtim_period = bss->dtim_period;
905 723
906 bss_info_changed |= BSS_CHANGED_BEACON_INT; 724 bss_info_changed |= BSS_CHANGED_BEACON_INT;
907 bss_info_changed |= ieee80211_handle_bss_capability(sdata, 725 bss_info_changed |= ieee80211_handle_bss_capability(sdata,
908 bss->cbss.capability, bss->has_erp_value, bss->erp_value); 726 cbss->capability, bss->has_erp_value, bss->erp_value);
909 727
910 sdata->u.mgd.associated = bss; 728 sdata->u.mgd.associated = cbss;
911 sdata->u.mgd.old_associate_work = wk; 729 memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN);
912 memcpy(sdata->u.mgd.bssid, bss->cbss.bssid, ETH_ALEN);
913 730
914 /* just to be sure */ 731 /* just to be sure */
915 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | 732 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
916 IEEE80211_STA_BEACON_POLL); 733 IEEE80211_STA_BEACON_POLL);
917 734
735 /*
736 * Always handle WMM once after association regardless
737 * of the first value the AP uses. Setting -1 here has
738 * that effect because the AP values is an unsigned
739 * 4-bit value.
740 */
741 sdata->u.mgd.wmm_last_param_set = -1;
742
918 ieee80211_led_assoc(local, 1); 743 ieee80211_led_assoc(local, 1);
919 744
920 sdata->vif.bss_conf.assoc = 1; 745 sdata->vif.bss_conf.assoc = 1;
@@ -932,99 +757,14 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
932 757
933 mutex_lock(&local->iflist_mtx); 758 mutex_lock(&local->iflist_mtx);
934 ieee80211_recalc_ps(local, -1); 759 ieee80211_recalc_ps(local, -1);
760 ieee80211_recalc_smps(local, sdata);
935 mutex_unlock(&local->iflist_mtx); 761 mutex_unlock(&local->iflist_mtx);
936 762
937 netif_start_queue(sdata->dev); 763 netif_tx_start_all_queues(sdata->dev);
938 netif_carrier_on(sdata->dev); 764 netif_carrier_on(sdata->dev);
939} 765}
940 766
941static enum rx_mgmt_action __must_check 767static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata)
942ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata,
943 struct ieee80211_mgd_work *wk)
944{
945 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
946 struct ieee80211_local *local = sdata->local;
947
948 wk->tries++;
949 if (wk->tries > IEEE80211_AUTH_MAX_TRIES) {
950 printk(KERN_DEBUG "%s: direct probe to AP %pM timed out\n",
951 sdata->dev->name, wk->bss->cbss.bssid);
952
953 /*
954 * Most likely AP is not in the range so remove the
955 * bss struct for that AP.
956 */
957 cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
958
959 /*
960 * We might have a pending scan which had no chance to run yet
961 * due to work needing to be done. Hence, queue the STAs work
962 * again for that.
963 */
964 ieee80211_queue_work(&local->hw, &ifmgd->work);
965 return RX_MGMT_CFG80211_AUTH_TO;
966 }
967
968 printk(KERN_DEBUG "%s: direct probe to AP %pM (try %d)\n",
969 sdata->dev->name, wk->bss->cbss.bssid,
970 wk->tries);
971
972 /*
973 * Direct probe is sent to broadcast address as some APs
974 * will not answer to direct packet in unassociated state.
975 */
976 ieee80211_send_probe_req(sdata, NULL, wk->ssid, wk->ssid_len, NULL, 0);
977
978 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
979 run_again(ifmgd, wk->timeout);
980
981 return RX_MGMT_NONE;
982}
983
984
985static enum rx_mgmt_action __must_check
986ieee80211_authenticate(struct ieee80211_sub_if_data *sdata,
987 struct ieee80211_mgd_work *wk)
988{
989 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
990 struct ieee80211_local *local = sdata->local;
991
992 wk->tries++;
993 if (wk->tries > IEEE80211_AUTH_MAX_TRIES) {
994 printk(KERN_DEBUG "%s: authentication with AP %pM"
995 " timed out\n",
996 sdata->dev->name, wk->bss->cbss.bssid);
997
998 /*
999 * Most likely AP is not in the range so remove the
1000 * bss struct for that AP.
1001 */
1002 cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
1003
1004 /*
1005 * We might have a pending scan which had no chance to run yet
1006 * due to work needing to be done. Hence, queue the STAs work
1007 * again for that.
1008 */
1009 ieee80211_queue_work(&local->hw, &ifmgd->work);
1010 return RX_MGMT_CFG80211_AUTH_TO;
1011 }
1012
1013 printk(KERN_DEBUG "%s: authenticate with AP %pM (try %d)\n",
1014 sdata->dev->name, wk->bss->cbss.bssid, wk->tries);
1015
1016 ieee80211_send_auth(sdata, 1, wk->auth_alg, wk->ie, wk->ie_len,
1017 wk->bss->cbss.bssid, NULL, 0, 0);
1018 wk->auth_transaction = 2;
1019
1020 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
1021 run_again(ifmgd, wk->timeout);
1022
1023 return RX_MGMT_NONE;
1024}
1025
1026static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1027 bool deauth)
1028{ 768{
1029 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 769 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1030 struct ieee80211_local *local = sdata->local; 770 struct ieee80211_local *local = sdata->local;
@@ -1037,21 +777,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1037 if (WARN_ON(!ifmgd->associated)) 777 if (WARN_ON(!ifmgd->associated))
1038 return; 778 return;
1039 779
1040 memcpy(bssid, ifmgd->associated->cbss.bssid, ETH_ALEN); 780 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
1041 781
1042 ifmgd->associated = NULL; 782 ifmgd->associated = NULL;
1043 memset(ifmgd->bssid, 0, ETH_ALEN); 783 memset(ifmgd->bssid, 0, ETH_ALEN);
1044 784
1045 if (deauth) {
1046 kfree(ifmgd->old_associate_work);
1047 ifmgd->old_associate_work = NULL;
1048 } else {
1049 struct ieee80211_mgd_work *wk = ifmgd->old_associate_work;
1050
1051 wk->state = IEEE80211_MGD_STATE_IDLE;
1052 list_add(&wk->list, &ifmgd->work_list);
1053 }
1054
1055 /* 785 /*
1056 * we need to commit the associated = NULL change because the 786 * we need to commit the associated = NULL change because the
1057 * scan code uses that to determine whether this iface should 787 * scan code uses that to determine whether this iface should
@@ -1066,13 +796,15 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1066 * time -- we don't want the scan code to enable queues. 796 * time -- we don't want the scan code to enable queues.
1067 */ 797 */
1068 798
1069 netif_stop_queue(sdata->dev); 799 netif_tx_stop_all_queues(sdata->dev);
1070 netif_carrier_off(sdata->dev); 800 netif_carrier_off(sdata->dev);
1071 801
1072 rcu_read_lock(); 802 rcu_read_lock();
1073 sta = sta_info_get(local, bssid); 803 sta = sta_info_get(sdata, bssid);
1074 if (sta) 804 if (sta) {
805 set_sta_flags(sta, WLAN_STA_DISASSOC);
1075 ieee80211_sta_tear_down_BA_sessions(sta); 806 ieee80211_sta_tear_down_BA_sessions(sta);
807 }
1076 rcu_read_unlock(); 808 rcu_read_unlock();
1077 809
1078 changed |= ieee80211_reset_erp_info(sdata); 810 changed |= ieee80211_reset_erp_info(sdata);
@@ -1105,57 +837,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1105 changed |= BSS_CHANGED_BSSID; 837 changed |= BSS_CHANGED_BSSID;
1106 ieee80211_bss_info_change_notify(sdata, changed); 838 ieee80211_bss_info_change_notify(sdata, changed);
1107 839
1108 rcu_read_lock(); 840 sta_info_destroy_addr(sdata, bssid);
1109
1110 sta = sta_info_get(local, bssid);
1111 if (!sta) {
1112 rcu_read_unlock();
1113 return;
1114 }
1115
1116 sta_info_unlink(&sta);
1117
1118 rcu_read_unlock();
1119
1120 sta_info_destroy(sta);
1121}
1122
1123static enum rx_mgmt_action __must_check
1124ieee80211_associate(struct ieee80211_sub_if_data *sdata,
1125 struct ieee80211_mgd_work *wk)
1126{
1127 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1128 struct ieee80211_local *local = sdata->local;
1129
1130 wk->tries++;
1131 if (wk->tries > IEEE80211_ASSOC_MAX_TRIES) {
1132 printk(KERN_DEBUG "%s: association with AP %pM"
1133 " timed out\n",
1134 sdata->dev->name, wk->bss->cbss.bssid);
1135
1136 /*
1137 * Most likely AP is not in the range so remove the
1138 * bss struct for that AP.
1139 */
1140 cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
1141
1142 /*
1143 * We might have a pending scan which had no chance to run yet
1144 * due to work needing to be done. Hence, queue the STAs work
1145 * again for that.
1146 */
1147 ieee80211_queue_work(&local->hw, &ifmgd->work);
1148 return RX_MGMT_CFG80211_ASSOC_TO;
1149 }
1150
1151 printk(KERN_DEBUG "%s: associate with AP %pM (try %d)\n",
1152 sdata->dev->name, wk->bss->cbss.bssid, wk->tries);
1153 ieee80211_send_assoc(sdata, wk);
1154
1155 wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
1156 run_again(ifmgd, wk->timeout);
1157
1158 return RX_MGMT_NONE;
1159} 841}
1160 842
1161void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, 843void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
@@ -1181,8 +863,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1181 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 863 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1182 const u8 *ssid; 864 const u8 *ssid;
1183 865
1184 ssid = ieee80211_bss_get_ie(&ifmgd->associated->cbss, WLAN_EID_SSID); 866 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
1185 ieee80211_send_probe_req(sdata, ifmgd->associated->cbss.bssid, 867 ieee80211_send_probe_req(sdata, ifmgd->associated->bssid,
1186 ssid + 2, ssid[1], NULL, 0); 868 ssid + 2, ssid[1], NULL, 0);
1187 869
1188 ifmgd->probe_send_count++; 870 ifmgd->probe_send_count++;
@@ -1196,12 +878,15 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1196 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 878 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1197 bool already = false; 879 bool already = false;
1198 880
1199 if (!netif_running(sdata->dev)) 881 if (!ieee80211_sdata_running(sdata))
1200 return; 882 return;
1201 883
1202 if (sdata->local->scanning) 884 if (sdata->local->scanning)
1203 return; 885 return;
1204 886
887 if (sdata->local->tmp_channel)
888 return;
889
1205 mutex_lock(&ifmgd->mtx); 890 mutex_lock(&ifmgd->mtx);
1206 891
1207 if (!ifmgd->associated) 892 if (!ifmgd->associated)
@@ -1210,7 +895,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1210#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 895#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1211 if (beacon && net_ratelimit()) 896 if (beacon && net_ratelimit())
1212 printk(KERN_DEBUG "%s: detected beacon loss from AP " 897 printk(KERN_DEBUG "%s: detected beacon loss from AP "
1213 "- sending probe request\n", sdata->dev->name); 898 "- sending probe request\n", sdata->name);
1214#endif 899#endif
1215 900
1216 /* 901 /*
@@ -1263,88 +948,8 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif)
1263} 948}
1264EXPORT_SYMBOL(ieee80211_beacon_loss); 949EXPORT_SYMBOL(ieee80211_beacon_loss);
1265 950
1266static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata,
1267 struct ieee80211_mgd_work *wk)
1268{
1269 wk->state = IEEE80211_MGD_STATE_IDLE;
1270 printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name);
1271}
1272
1273
1274static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
1275 struct ieee80211_mgd_work *wk,
1276 struct ieee80211_mgmt *mgmt,
1277 size_t len)
1278{
1279 u8 *pos;
1280 struct ieee802_11_elems elems;
1281
1282 pos = mgmt->u.auth.variable;
1283 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1284 if (!elems.challenge)
1285 return;
1286 ieee80211_send_auth(sdata, 3, wk->auth_alg,
1287 elems.challenge - 2, elems.challenge_len + 2,
1288 wk->bss->cbss.bssid,
1289 wk->key, wk->key_len, wk->key_idx);
1290 wk->auth_transaction = 4;
1291}
1292
1293static enum rx_mgmt_action __must_check
1294ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1295 struct ieee80211_mgd_work *wk,
1296 struct ieee80211_mgmt *mgmt, size_t len)
1297{
1298 u16 auth_alg, auth_transaction, status_code;
1299
1300 if (wk->state != IEEE80211_MGD_STATE_AUTH)
1301 return RX_MGMT_NONE;
1302
1303 if (len < 24 + 6)
1304 return RX_MGMT_NONE;
1305
1306 if (memcmp(wk->bss->cbss.bssid, mgmt->sa, ETH_ALEN) != 0)
1307 return RX_MGMT_NONE;
1308
1309 if (memcmp(wk->bss->cbss.bssid, mgmt->bssid, ETH_ALEN) != 0)
1310 return RX_MGMT_NONE;
1311
1312 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
1313 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
1314 status_code = le16_to_cpu(mgmt->u.auth.status_code);
1315
1316 if (auth_alg != wk->auth_alg ||
1317 auth_transaction != wk->auth_transaction)
1318 return RX_MGMT_NONE;
1319
1320 if (status_code != WLAN_STATUS_SUCCESS) {
1321 list_del(&wk->list);
1322 kfree(wk);
1323 return RX_MGMT_CFG80211_AUTH;
1324 }
1325
1326 switch (wk->auth_alg) {
1327 case WLAN_AUTH_OPEN:
1328 case WLAN_AUTH_LEAP:
1329 case WLAN_AUTH_FT:
1330 ieee80211_auth_completed(sdata, wk);
1331 return RX_MGMT_CFG80211_AUTH;
1332 case WLAN_AUTH_SHARED_KEY:
1333 if (wk->auth_transaction == 4) {
1334 ieee80211_auth_completed(sdata, wk);
1335 return RX_MGMT_CFG80211_AUTH;
1336 } else
1337 ieee80211_auth_challenge(sdata, wk, mgmt, len);
1338 break;
1339 }
1340
1341 return RX_MGMT_NONE;
1342}
1343
1344
1345static enum rx_mgmt_action __must_check 951static enum rx_mgmt_action __must_check
1346ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, 952ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1347 struct ieee80211_mgd_work *wk,
1348 struct ieee80211_mgmt *mgmt, size_t len) 953 struct ieee80211_mgmt *mgmt, size_t len)
1349{ 954{
1350 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 955 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -1356,23 +961,15 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1356 961
1357 ASSERT_MGD_MTX(ifmgd); 962 ASSERT_MGD_MTX(ifmgd);
1358 963
1359 if (wk) 964 bssid = ifmgd->associated->bssid;
1360 bssid = wk->bss->cbss.bssid;
1361 else
1362 bssid = ifmgd->associated->cbss.bssid;
1363 965
1364 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); 966 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
1365 967
1366 printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n", 968 printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n",
1367 sdata->dev->name, bssid, reason_code); 969 sdata->name, bssid, reason_code);
1368 970
1369 if (!wk) { 971 ieee80211_set_disassoc(sdata);
1370 ieee80211_set_disassoc(sdata, true); 972 ieee80211_recalc_idle(sdata->local);
1371 ieee80211_recalc_idle(sdata->local);
1372 } else {
1373 list_del(&wk->list);
1374 kfree(wk);
1375 }
1376 973
1377 return RX_MGMT_CFG80211_DEAUTH; 974 return RX_MGMT_CFG80211_DEAUTH;
1378} 975}
@@ -1393,123 +990,72 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1393 if (WARN_ON(!ifmgd->associated)) 990 if (WARN_ON(!ifmgd->associated))
1394 return RX_MGMT_NONE; 991 return RX_MGMT_NONE;
1395 992
1396 if (WARN_ON(memcmp(ifmgd->associated->cbss.bssid, mgmt->sa, ETH_ALEN))) 993 if (WARN_ON(memcmp(ifmgd->associated->bssid, mgmt->sa, ETH_ALEN)))
1397 return RX_MGMT_NONE; 994 return RX_MGMT_NONE;
1398 995
1399 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 996 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
1400 997
1401 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n", 998 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n",
1402 sdata->dev->name, mgmt->sa, reason_code); 999 sdata->name, mgmt->sa, reason_code);
1403 1000
1404 ieee80211_set_disassoc(sdata, false); 1001 ieee80211_set_disassoc(sdata);
1405 ieee80211_recalc_idle(sdata->local); 1002 ieee80211_recalc_idle(sdata->local);
1406 return RX_MGMT_CFG80211_DISASSOC; 1003 return RX_MGMT_CFG80211_DISASSOC;
1407} 1004}
1408 1005
1409 1006
1410static enum rx_mgmt_action __must_check 1007static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1411ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, 1008 struct ieee80211_mgmt *mgmt, size_t len)
1412 struct ieee80211_mgd_work *wk,
1413 struct ieee80211_mgmt *mgmt, size_t len,
1414 bool reassoc)
1415{ 1009{
1010 struct ieee80211_sub_if_data *sdata = wk->sdata;
1416 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1011 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1417 struct ieee80211_local *local = sdata->local; 1012 struct ieee80211_local *local = sdata->local;
1418 struct ieee80211_supported_band *sband; 1013 struct ieee80211_supported_band *sband;
1419 struct sta_info *sta; 1014 struct sta_info *sta;
1015 struct cfg80211_bss *cbss = wk->assoc.bss;
1016 u8 *pos;
1420 u32 rates, basic_rates; 1017 u32 rates, basic_rates;
1421 u16 capab_info, status_code, aid; 1018 u16 capab_info, aid;
1422 struct ieee802_11_elems elems; 1019 struct ieee802_11_elems elems;
1423 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; 1020 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
1424 u8 *pos;
1425 u32 changed = 0; 1021 u32 changed = 0;
1426 int i, j; 1022 int i, j, err;
1427 bool have_higher_than_11mbit = false, newsta = false; 1023 bool have_higher_than_11mbit = false;
1428 u16 ap_ht_cap_flags; 1024 u16 ap_ht_cap_flags;
1429 1025
1430 /* 1026 /* AssocResp and ReassocResp have identical structure */
1431 * AssocResp and ReassocResp have identical structure, so process both
1432 * of them in this function.
1433 */
1434
1435 if (len < 24 + 6)
1436 return RX_MGMT_NONE;
1437 1027
1438 if (memcmp(wk->bss->cbss.bssid, mgmt->sa, ETH_ALEN) != 0)
1439 return RX_MGMT_NONE;
1440
1441 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
1442 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
1443 aid = le16_to_cpu(mgmt->u.assoc_resp.aid); 1028 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
1444 1029 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
1445 printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
1446 "status=%d aid=%d)\n",
1447 sdata->dev->name, reassoc ? "Rea" : "A", mgmt->sa,
1448 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
1449
1450 pos = mgmt->u.assoc_resp.variable;
1451 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1452
1453 if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
1454 elems.timeout_int && elems.timeout_int_len == 5 &&
1455 elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
1456 u32 tu, ms;
1457 tu = get_unaligned_le32(elems.timeout_int + 1);
1458 ms = tu * 1024 / 1000;
1459 printk(KERN_DEBUG "%s: AP rejected association temporarily; "
1460 "comeback duration %u TU (%u ms)\n",
1461 sdata->dev->name, tu, ms);
1462 wk->timeout = jiffies + msecs_to_jiffies(ms);
1463 if (ms > IEEE80211_ASSOC_TIMEOUT)
1464 run_again(ifmgd, jiffies + msecs_to_jiffies(ms));
1465 return RX_MGMT_NONE;
1466 }
1467
1468 if (status_code != WLAN_STATUS_SUCCESS) {
1469 printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
1470 sdata->dev->name, status_code);
1471 wk->state = IEEE80211_MGD_STATE_IDLE;
1472 return RX_MGMT_CFG80211_ASSOC;
1473 }
1474 1030
1475 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) 1031 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
1476 printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not " 1032 printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not "
1477 "set\n", sdata->dev->name, aid); 1033 "set\n", sdata->name, aid);
1478 aid &= ~(BIT(15) | BIT(14)); 1034 aid &= ~(BIT(15) | BIT(14));
1479 1035
1036 pos = mgmt->u.assoc_resp.variable;
1037 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1038
1480 if (!elems.supp_rates) { 1039 if (!elems.supp_rates) {
1481 printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", 1040 printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n",
1482 sdata->dev->name); 1041 sdata->name);
1483 return RX_MGMT_NONE; 1042 return false;
1484 } 1043 }
1485 1044
1486 printk(KERN_DEBUG "%s: associated\n", sdata->dev->name);
1487 ifmgd->aid = aid; 1045 ifmgd->aid = aid;
1488 1046
1489 rcu_read_lock(); 1047 sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
1490
1491 /* Add STA entry for the AP */
1492 sta = sta_info_get(local, wk->bss->cbss.bssid);
1493 if (!sta) { 1048 if (!sta) {
1494 newsta = true; 1049 printk(KERN_DEBUG "%s: failed to alloc STA entry for"
1495 1050 " the AP\n", sdata->name);
1496 rcu_read_unlock(); 1051 return false;
1497
1498 sta = sta_info_alloc(sdata, wk->bss->cbss.bssid, GFP_KERNEL);
1499 if (!sta) {
1500 printk(KERN_DEBUG "%s: failed to alloc STA entry for"
1501 " the AP\n", sdata->dev->name);
1502 return RX_MGMT_NONE;
1503 }
1504
1505 set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC |
1506 WLAN_STA_ASSOC_AP);
1507 if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
1508 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
1509
1510 rcu_read_lock();
1511 } 1052 }
1512 1053
1054 set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC |
1055 WLAN_STA_ASSOC_AP);
1056 if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
1057 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
1058
1513 rates = 0; 1059 rates = 0;
1514 basic_rates = 0; 1060 basic_rates = 0;
1515 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1061 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
@@ -1572,40 +1118,40 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1572 if (elems.wmm_param) 1118 if (elems.wmm_param)
1573 set_sta_flags(sta, WLAN_STA_WME); 1119 set_sta_flags(sta, WLAN_STA_WME);
1574 1120
1575 if (newsta) { 1121 err = sta_info_insert(sta);
1576 int err = sta_info_insert(sta); 1122 sta = NULL;
1577 if (err) { 1123 if (err) {
1578 printk(KERN_DEBUG "%s: failed to insert STA entry for" 1124 printk(KERN_DEBUG "%s: failed to insert STA entry for"
1579 " the AP (error %d)\n", sdata->dev->name, err); 1125 " the AP (error %d)\n", sdata->name, err);
1580 rcu_read_unlock(); 1126 return false;
1581 return RX_MGMT_NONE;
1582 }
1583 } 1127 }
1584 1128
1585 rcu_read_unlock();
1586
1587 if (elems.wmm_param) 1129 if (elems.wmm_param)
1588 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param, 1130 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param,
1589 elems.wmm_param_len); 1131 elems.wmm_param_len);
1590 else 1132 else
1591 ieee80211_set_wmm_default(sdata); 1133 ieee80211_set_wmm_default(sdata);
1592 1134
1135 local->oper_channel = wk->chan;
1136
1593 if (elems.ht_info_elem && elems.wmm_param && 1137 if (elems.ht_info_elem && elems.wmm_param &&
1594 (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) && 1138 (sdata->local->hw.queues >= 4) &&
1595 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) 1139 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
1596 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, 1140 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
1597 wk->bss->cbss.bssid, 1141 cbss->bssid, ap_ht_cap_flags);
1598 ap_ht_cap_flags);
1599
1600 /* delete work item -- must be before set_associated for PS */
1601 list_del(&wk->list);
1602 1142
1603 /* set AID and assoc capability, 1143 /* set AID and assoc capability,
1604 * ieee80211_set_associated() will tell the driver */ 1144 * ieee80211_set_associated() will tell the driver */
1605 bss_conf->aid = aid; 1145 bss_conf->aid = aid;
1606 bss_conf->assoc_capability = capab_info; 1146 bss_conf->assoc_capability = capab_info;
1607 /* this will take ownership of wk */ 1147 ieee80211_set_associated(sdata, cbss, changed);
1608 ieee80211_set_associated(sdata, wk, changed); 1148
1149 /*
1150 * If we're using 4-addr mode, let the AP know that we're
1151 * doing so, so that it can create the STA VLAN on its side
1152 */
1153 if (ifmgd->use_4addr)
1154 ieee80211_send_4addr_nullfunc(local, sdata);
1609 1155
1610 /* 1156 /*
1611 * Start timer to probe the connection to the AP now. 1157 * Start timer to probe the connection to the AP now.
@@ -1614,7 +1160,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1614 ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt); 1160 ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
1615 mod_beacon_timer(sdata); 1161 mod_beacon_timer(sdata);
1616 1162
1617 return RX_MGMT_CFG80211_ASSOC; 1163 return true;
1618} 1164}
1619 1165
1620 1166
@@ -1629,6 +1175,13 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1629 int freq; 1175 int freq;
1630 struct ieee80211_bss *bss; 1176 struct ieee80211_bss *bss;
1631 struct ieee80211_channel *channel; 1177 struct ieee80211_channel *channel;
1178 bool need_ps = false;
1179
1180 if (sdata->u.mgd.associated) {
1181 bss = (void *)sdata->u.mgd.associated->priv;
1182 /* not previously set so we may need to recalc */
1183 need_ps = !bss->dtim_period;
1184 }
1632 1185
1633 if (elems->ds_params && elems->ds_params_len == 1) 1186 if (elems->ds_params && elems->ds_params_len == 1)
1634 freq = ieee80211_channel_to_frequency(elems->ds_params[0]); 1187 freq = ieee80211_channel_to_frequency(elems->ds_params[0]);
@@ -1648,8 +1201,14 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1648 if (!sdata->u.mgd.associated) 1201 if (!sdata->u.mgd.associated)
1649 return; 1202 return;
1650 1203
1204 if (need_ps) {
1205 mutex_lock(&local->iflist_mtx);
1206 ieee80211_recalc_ps(local, -1);
1207 mutex_unlock(&local->iflist_mtx);
1208 }
1209
1651 if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) && 1210 if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) &&
1652 (memcmp(mgmt->bssid, sdata->u.mgd.associated->cbss.bssid, 1211 (memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid,
1653 ETH_ALEN) == 0)) { 1212 ETH_ALEN) == 0)) {
1654 struct ieee80211_channel_sw_ie *sw_elem = 1213 struct ieee80211_channel_sw_ie *sw_elem =
1655 (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem; 1214 (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
@@ -1659,19 +1218,19 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1659 1218
1660 1219
1661static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, 1220static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1662 struct ieee80211_mgd_work *wk, 1221 struct sk_buff *skb)
1663 struct ieee80211_mgmt *mgmt, size_t len,
1664 struct ieee80211_rx_status *rx_status)
1665{ 1222{
1223 struct ieee80211_mgmt *mgmt = (void *)skb->data;
1666 struct ieee80211_if_managed *ifmgd; 1224 struct ieee80211_if_managed *ifmgd;
1667 size_t baselen; 1225 struct ieee80211_rx_status *rx_status = (void *) skb->cb;
1226 size_t baselen, len = skb->len;
1668 struct ieee802_11_elems elems; 1227 struct ieee802_11_elems elems;
1669 1228
1670 ifmgd = &sdata->u.mgd; 1229 ifmgd = &sdata->u.mgd;
1671 1230
1672 ASSERT_MGD_MTX(ifmgd); 1231 ASSERT_MGD_MTX(ifmgd);
1673 1232
1674 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) 1233 if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
1675 return; /* ignore ProbeResp to foreign address */ 1234 return; /* ignore ProbeResp to foreign address */
1676 1235
1677 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 1236 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -1683,17 +1242,8 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1683 1242
1684 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); 1243 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
1685 1244
1686 /* direct probe may be part of the association flow */
1687 if (wk && wk->state == IEEE80211_MGD_STATE_PROBE) {
1688 printk(KERN_DEBUG "%s: direct probe responded\n",
1689 sdata->dev->name);
1690 wk->tries = 0;
1691 wk->state = IEEE80211_MGD_STATE_AUTH;
1692 WARN_ON(ieee80211_authenticate(sdata, wk) != RX_MGMT_NONE);
1693 }
1694
1695 if (ifmgd->associated && 1245 if (ifmgd->associated &&
1696 memcmp(mgmt->bssid, ifmgd->associated->cbss.bssid, ETH_ALEN) == 0 && 1246 memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN) == 0 &&
1697 ifmgd->flags & (IEEE80211_STA_BEACON_POLL | 1247 ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
1698 IEEE80211_STA_CONNECTION_POLL)) { 1248 IEEE80211_STA_CONNECTION_POLL)) {
1699 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | 1249 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
@@ -1766,7 +1316,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1766 if (!ifmgd->associated) 1316 if (!ifmgd->associated)
1767 return; 1317 return;
1768 1318
1769 bssid = ifmgd->associated->cbss.bssid; 1319 bssid = ifmgd->associated->bssid;
1770 1320
1771 /* 1321 /*
1772 * And in theory even frames from a different AP we were just 1322 * And in theory even frames from a different AP we were just
@@ -1779,7 +1329,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1779#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1329#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1780 if (net_ratelimit()) { 1330 if (net_ratelimit()) {
1781 printk(KERN_DEBUG "%s: cancelling probereq poll due " 1331 printk(KERN_DEBUG "%s: cancelling probereq poll due "
1782 "to a received beacon\n", sdata->dev->name); 1332 "to a received beacon\n", sdata->name);
1783 } 1333 }
1784#endif 1334#endif
1785 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; 1335 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
@@ -1857,7 +1407,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1857 1407
1858 rcu_read_lock(); 1408 rcu_read_lock();
1859 1409
1860 sta = sta_info_get(local, bssid); 1410 sta = sta_info_get(sdata, bssid);
1861 if (WARN_ON(!sta)) { 1411 if (WARN_ON(!sta)) {
1862 rcu_read_unlock(); 1412 rcu_read_unlock();
1863 return; 1413 return;
@@ -1905,9 +1455,6 @@ ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
1905 switch (fc & IEEE80211_FCTL_STYPE) { 1455 switch (fc & IEEE80211_FCTL_STYPE) {
1906 case IEEE80211_STYPE_PROBE_RESP: 1456 case IEEE80211_STYPE_PROBE_RESP:
1907 case IEEE80211_STYPE_BEACON: 1457 case IEEE80211_STYPE_BEACON:
1908 case IEEE80211_STYPE_AUTH:
1909 case IEEE80211_STYPE_ASSOC_RESP:
1910 case IEEE80211_STYPE_REASSOC_RESP:
1911 case IEEE80211_STYPE_DEAUTH: 1458 case IEEE80211_STYPE_DEAUTH:
1912 case IEEE80211_STYPE_DISASSOC: 1459 case IEEE80211_STYPE_DISASSOC:
1913 case IEEE80211_STYPE_ACTION: 1460 case IEEE80211_STYPE_ACTION:
@@ -1925,7 +1472,6 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1925 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1472 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1926 struct ieee80211_rx_status *rx_status; 1473 struct ieee80211_rx_status *rx_status;
1927 struct ieee80211_mgmt *mgmt; 1474 struct ieee80211_mgmt *mgmt;
1928 struct ieee80211_mgd_work *wk;
1929 enum rx_mgmt_action rma = RX_MGMT_NONE; 1475 enum rx_mgmt_action rma = RX_MGMT_NONE;
1930 u16 fc; 1476 u16 fc;
1931 1477
@@ -1936,29 +1482,28 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1936 mutex_lock(&ifmgd->mtx); 1482 mutex_lock(&ifmgd->mtx);
1937 1483
1938 if (ifmgd->associated && 1484 if (ifmgd->associated &&
1939 memcmp(ifmgd->associated->cbss.bssid, mgmt->bssid, 1485 memcmp(ifmgd->associated->bssid, mgmt->bssid, ETH_ALEN) == 0) {
1940 ETH_ALEN) == 0) {
1941 switch (fc & IEEE80211_FCTL_STYPE) { 1486 switch (fc & IEEE80211_FCTL_STYPE) {
1942 case IEEE80211_STYPE_BEACON: 1487 case IEEE80211_STYPE_BEACON:
1943 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, 1488 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len,
1944 rx_status); 1489 rx_status);
1945 break; 1490 break;
1946 case IEEE80211_STYPE_PROBE_RESP: 1491 case IEEE80211_STYPE_PROBE_RESP:
1947 ieee80211_rx_mgmt_probe_resp(sdata, NULL, mgmt, 1492 ieee80211_rx_mgmt_probe_resp(sdata, skb);
1948 skb->len, rx_status);
1949 break; 1493 break;
1950 case IEEE80211_STYPE_DEAUTH: 1494 case IEEE80211_STYPE_DEAUTH:
1951 rma = ieee80211_rx_mgmt_deauth(sdata, NULL, 1495 rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len);
1952 mgmt, skb->len);
1953 break; 1496 break;
1954 case IEEE80211_STYPE_DISASSOC: 1497 case IEEE80211_STYPE_DISASSOC:
1955 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); 1498 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
1956 break; 1499 break;
1957 case IEEE80211_STYPE_ACTION: 1500 case IEEE80211_STYPE_ACTION:
1958 /* XXX: differentiate, can only happen for CSA now! */ 1501 if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
1502 break;
1503
1959 ieee80211_sta_process_chanswitch(sdata, 1504 ieee80211_sta_process_chanswitch(sdata,
1960 &mgmt->u.action.u.chan_switch.sw_elem, 1505 &mgmt->u.action.u.chan_switch.sw_elem,
1961 ifmgd->associated); 1506 (void *)ifmgd->associated->priv);
1962 break; 1507 break;
1963 } 1508 }
1964 mutex_unlock(&ifmgd->mtx); 1509 mutex_unlock(&ifmgd->mtx);
@@ -1979,58 +1524,11 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1979 goto out; 1524 goto out;
1980 } 1525 }
1981 1526
1982 list_for_each_entry(wk, &ifmgd->work_list, list) {
1983 if (memcmp(wk->bss->cbss.bssid, mgmt->bssid, ETH_ALEN) != 0)
1984 continue;
1985
1986 switch (fc & IEEE80211_FCTL_STYPE) {
1987 case IEEE80211_STYPE_PROBE_RESP:
1988 ieee80211_rx_mgmt_probe_resp(sdata, wk, mgmt, skb->len,
1989 rx_status);
1990 break;
1991 case IEEE80211_STYPE_AUTH:
1992 rma = ieee80211_rx_mgmt_auth(sdata, wk, mgmt, skb->len);
1993 break;
1994 case IEEE80211_STYPE_ASSOC_RESP:
1995 rma = ieee80211_rx_mgmt_assoc_resp(sdata, wk, mgmt,
1996 skb->len, false);
1997 break;
1998 case IEEE80211_STYPE_REASSOC_RESP:
1999 rma = ieee80211_rx_mgmt_assoc_resp(sdata, wk, mgmt,
2000 skb->len, true);
2001 break;
2002 case IEEE80211_STYPE_DEAUTH:
2003 rma = ieee80211_rx_mgmt_deauth(sdata, wk, mgmt,
2004 skb->len);
2005 break;
2006 }
2007 /*
2008 * We've processed this frame for that work, so it can't
2009 * belong to another work struct.
2010 * NB: this is also required for correctness because the
2011 * called functions can free 'wk', and for 'rma'!
2012 */
2013 break;
2014 }
2015
2016 mutex_unlock(&ifmgd->mtx); 1527 mutex_unlock(&ifmgd->mtx);
2017 1528
2018 switch (rma) { 1529 if (skb->len >= 24 + 2 /* mgmt + deauth reason */ &&
2019 case RX_MGMT_NONE: 1530 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH)
2020 /* no action */
2021 break;
2022 case RX_MGMT_CFG80211_AUTH:
2023 cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, skb->len);
2024 break;
2025 case RX_MGMT_CFG80211_ASSOC:
2026 cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, skb->len);
2027 break;
2028 case RX_MGMT_CFG80211_DEAUTH:
2029 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); 1531 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
2030 break;
2031 default:
2032 WARN(1, "unexpected: %d", rma);
2033 }
2034 1532
2035 out: 1533 out:
2036 kfree_skb(skb); 1534 kfree_skb(skb);
@@ -2058,12 +1556,8 @@ static void ieee80211_sta_work(struct work_struct *work)
2058 struct ieee80211_local *local = sdata->local; 1556 struct ieee80211_local *local = sdata->local;
2059 struct ieee80211_if_managed *ifmgd; 1557 struct ieee80211_if_managed *ifmgd;
2060 struct sk_buff *skb; 1558 struct sk_buff *skb;
2061 struct ieee80211_mgd_work *wk, *tmp;
2062 LIST_HEAD(free_work);
2063 enum rx_mgmt_action rma;
2064 bool anybusy = false;
2065 1559
2066 if (!netif_running(sdata->dev)) 1560 if (!ieee80211_sdata_running(sdata))
2067 return; 1561 return;
2068 1562
2069 if (local->scanning) 1563 if (local->scanning)
@@ -2094,7 +1588,7 @@ static void ieee80211_sta_work(struct work_struct *work)
2094 ifmgd->associated) { 1588 ifmgd->associated) {
2095 u8 bssid[ETH_ALEN]; 1589 u8 bssid[ETH_ALEN];
2096 1590
2097 memcpy(bssid, ifmgd->associated->cbss.bssid, ETH_ALEN); 1591 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
2098 if (time_is_after_jiffies(ifmgd->probe_timeout)) 1592 if (time_is_after_jiffies(ifmgd->probe_timeout))
2099 run_again(ifmgd, ifmgd->probe_timeout); 1593 run_again(ifmgd, ifmgd->probe_timeout);
2100 1594
@@ -2116,7 +1610,7 @@ static void ieee80211_sta_work(struct work_struct *work)
2116 printk(KERN_DEBUG "No probe response from AP %pM" 1610 printk(KERN_DEBUG "No probe response from AP %pM"
2117 " after %dms, disconnecting.\n", 1611 " after %dms, disconnecting.\n",
2118 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); 1612 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
2119 ieee80211_set_disassoc(sdata, true); 1613 ieee80211_set_disassoc(sdata);
2120 ieee80211_recalc_idle(local); 1614 ieee80211_recalc_idle(local);
2121 mutex_unlock(&ifmgd->mtx); 1615 mutex_unlock(&ifmgd->mtx);
2122 /* 1616 /*
@@ -2131,87 +1625,7 @@ static void ieee80211_sta_work(struct work_struct *work)
2131 } 1625 }
2132 } 1626 }
2133 1627
2134
2135 ieee80211_recalc_idle(local);
2136
2137 list_for_each_entry_safe(wk, tmp, &ifmgd->work_list, list) {
2138 if (time_is_after_jiffies(wk->timeout)) {
2139 /*
2140 * This work item isn't supposed to be worked on
2141 * right now, but take care to adjust the timer
2142 * properly.
2143 */
2144 run_again(ifmgd, wk->timeout);
2145 continue;
2146 }
2147
2148 switch (wk->state) {
2149 default:
2150 WARN_ON(1);
2151 /* fall through */
2152 case IEEE80211_MGD_STATE_IDLE:
2153 /* nothing */
2154 rma = RX_MGMT_NONE;
2155 break;
2156 case IEEE80211_MGD_STATE_PROBE:
2157 rma = ieee80211_direct_probe(sdata, wk);
2158 break;
2159 case IEEE80211_MGD_STATE_AUTH:
2160 rma = ieee80211_authenticate(sdata, wk);
2161 break;
2162 case IEEE80211_MGD_STATE_ASSOC:
2163 rma = ieee80211_associate(sdata, wk);
2164 break;
2165 }
2166
2167 switch (rma) {
2168 case RX_MGMT_NONE:
2169 /* no action required */
2170 break;
2171 case RX_MGMT_CFG80211_AUTH_TO:
2172 case RX_MGMT_CFG80211_ASSOC_TO:
2173 list_del(&wk->list);
2174 list_add(&wk->list, &free_work);
2175 wk->tries = rma; /* small abuse but only local */
2176 break;
2177 default:
2178 WARN(1, "unexpected: %d", rma);
2179 }
2180 }
2181
2182 list_for_each_entry(wk, &ifmgd->work_list, list) {
2183 if (wk->state != IEEE80211_MGD_STATE_IDLE) {
2184 anybusy = true;
2185 break;
2186 }
2187 }
2188 if (!anybusy &&
2189 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request))
2190 ieee80211_queue_delayed_work(&local->hw,
2191 &local->scan_work,
2192 round_jiffies_relative(0));
2193
2194 mutex_unlock(&ifmgd->mtx); 1628 mutex_unlock(&ifmgd->mtx);
2195
2196 list_for_each_entry_safe(wk, tmp, &free_work, list) {
2197 switch (wk->tries) {
2198 case RX_MGMT_CFG80211_AUTH_TO:
2199 cfg80211_send_auth_timeout(sdata->dev,
2200 wk->bss->cbss.bssid);
2201 break;
2202 case RX_MGMT_CFG80211_ASSOC_TO:
2203 cfg80211_send_assoc_timeout(sdata->dev,
2204 wk->bss->cbss.bssid);
2205 break;
2206 default:
2207 WARN(1, "unexpected: %d", wk->tries);
2208 }
2209
2210 list_del(&wk->list);
2211 kfree(wk);
2212 }
2213
2214 ieee80211_recalc_idle(local);
2215} 1629}
2216 1630
2217static void ieee80211_sta_bcn_mon_timer(unsigned long data) 1631static void ieee80211_sta_bcn_mon_timer(unsigned long data)
@@ -2320,14 +1734,14 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
2320 (unsigned long) sdata); 1734 (unsigned long) sdata);
2321 skb_queue_head_init(&ifmgd->skb_queue); 1735 skb_queue_head_init(&ifmgd->skb_queue);
2322 1736
2323 INIT_LIST_HEAD(&ifmgd->work_list);
2324
2325 ifmgd->capab = WLAN_CAPABILITY_ESS;
2326 ifmgd->flags = 0; 1737 ifmgd->flags = 0;
2327 if (sdata->local->hw.queues >= 4)
2328 ifmgd->flags |= IEEE80211_STA_WMM_ENABLED;
2329 1738
2330 mutex_init(&ifmgd->mtx); 1739 mutex_init(&ifmgd->mtx);
1740
1741 if (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
1742 ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC;
1743 else
1744 ifmgd->req_smps = IEEE80211_SMPS_OFF;
2331} 1745}
2332 1746
2333/* scan finished notification */ 1747/* scan finished notification */
@@ -2358,12 +1772,34 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
2358} 1772}
2359 1773
2360/* config hooks */ 1774/* config hooks */
1775static enum work_done_result
1776ieee80211_probe_auth_done(struct ieee80211_work *wk,
1777 struct sk_buff *skb)
1778{
1779 if (!skb) {
1780 cfg80211_send_auth_timeout(wk->sdata->dev, wk->filter_ta);
1781 return WORK_DONE_DESTROY;
1782 }
1783
1784 if (wk->type == IEEE80211_WORK_AUTH) {
1785 cfg80211_send_rx_auth(wk->sdata->dev, skb->data, skb->len);
1786 return WORK_DONE_DESTROY;
1787 }
1788
1789 mutex_lock(&wk->sdata->u.mgd.mtx);
1790 ieee80211_rx_mgmt_probe_resp(wk->sdata, skb);
1791 mutex_unlock(&wk->sdata->u.mgd.mtx);
1792
1793 wk->type = IEEE80211_WORK_AUTH;
1794 wk->probe_auth.tries = 0;
1795 return WORK_DONE_REQUEUE;
1796}
1797
2361int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, 1798int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2362 struct cfg80211_auth_request *req) 1799 struct cfg80211_auth_request *req)
2363{ 1800{
2364 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2365 const u8 *ssid; 1801 const u8 *ssid;
2366 struct ieee80211_mgd_work *wk; 1802 struct ieee80211_work *wk;
2367 u16 auth_alg; 1803 u16 auth_alg;
2368 1804
2369 switch (req->auth_type) { 1805 switch (req->auth_type) {
@@ -2387,7 +1823,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2387 if (!wk) 1823 if (!wk)
2388 return -ENOMEM; 1824 return -ENOMEM;
2389 1825
2390 wk->bss = (void *)req->bss; 1826 memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN);
2391 1827
2392 if (req->ie && req->ie_len) { 1828 if (req->ie && req->ie_len) {
2393 memcpy(wk->ie, req->ie, req->ie_len); 1829 memcpy(wk->ie, req->ie, req->ie_len);
@@ -2395,68 +1831,95 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2395 } 1831 }
2396 1832
2397 if (req->key && req->key_len) { 1833 if (req->key && req->key_len) {
2398 wk->key_len = req->key_len; 1834 wk->probe_auth.key_len = req->key_len;
2399 wk->key_idx = req->key_idx; 1835 wk->probe_auth.key_idx = req->key_idx;
2400 memcpy(wk->key, req->key, req->key_len); 1836 memcpy(wk->probe_auth.key, req->key, req->key_len);
2401 } 1837 }
2402 1838
2403 ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); 1839 ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
2404 memcpy(wk->ssid, ssid + 2, ssid[1]); 1840 memcpy(wk->probe_auth.ssid, ssid + 2, ssid[1]);
2405 wk->ssid_len = ssid[1]; 1841 wk->probe_auth.ssid_len = ssid[1];
2406
2407 wk->state = IEEE80211_MGD_STATE_PROBE;
2408 wk->auth_alg = auth_alg;
2409 wk->timeout = jiffies; /* run right away */
2410 1842
2411 /* 1843 wk->probe_auth.algorithm = auth_alg;
2412 * XXX: if still associated need to tell AP that we're going 1844 wk->probe_auth.privacy = req->bss->capability & WLAN_CAPABILITY_PRIVACY;
2413 * to sleep and then change channel etc.
2414 */
2415 sdata->local->oper_channel = req->bss->channel;
2416 ieee80211_hw_config(sdata->local, 0);
2417 1845
2418 mutex_lock(&ifmgd->mtx); 1846 /* if we already have a probe, don't probe again */
2419 list_add(&wk->list, &sdata->u.mgd.work_list); 1847 if (req->bss->proberesp_ies)
2420 mutex_unlock(&ifmgd->mtx); 1848 wk->type = IEEE80211_WORK_AUTH;
1849 else
1850 wk->type = IEEE80211_WORK_DIRECT_PROBE;
1851 wk->chan = req->bss->channel;
1852 wk->sdata = sdata;
1853 wk->done = ieee80211_probe_auth_done;
2421 1854
2422 ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.work); 1855 ieee80211_add_work(wk);
2423 return 0; 1856 return 0;
2424} 1857}
2425 1858
2426int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, 1859static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
2427 struct cfg80211_assoc_request *req) 1860 struct sk_buff *skb)
2428{ 1861{
2429 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1862 struct ieee80211_mgmt *mgmt;
2430 struct ieee80211_mgd_work *wk, *found = NULL; 1863 u16 status;
2431 int i, err;
2432 1864
2433 mutex_lock(&ifmgd->mtx); 1865 if (!skb) {
1866 cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta);
1867 return WORK_DONE_DESTROY;
1868 }
2434 1869
2435 list_for_each_entry(wk, &ifmgd->work_list, list) { 1870 mgmt = (void *)skb->data;
2436 if (&wk->bss->cbss == req->bss && 1871 status = le16_to_cpu(mgmt->u.assoc_resp.status_code);
2437 wk->state == IEEE80211_MGD_STATE_IDLE) { 1872
2438 found = wk; 1873 if (status == WLAN_STATUS_SUCCESS) {
2439 break; 1874 mutex_lock(&wk->sdata->u.mgd.mtx);
1875 if (!ieee80211_assoc_success(wk, mgmt, skb->len)) {
1876 mutex_unlock(&wk->sdata->u.mgd.mtx);
1877 /* oops -- internal error -- send timeout for now */
1878 cfg80211_send_assoc_timeout(wk->sdata->dev,
1879 wk->filter_ta);
1880 return WORK_DONE_DESTROY;
2440 } 1881 }
1882 mutex_unlock(&wk->sdata->u.mgd.mtx);
2441 } 1883 }
2442 1884
2443 if (!found) { 1885 cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len);
2444 err = -ENOLINK; 1886 return WORK_DONE_DESTROY;
2445 goto out; 1887}
2446 }
2447 1888
2448 list_del(&found->list); 1889int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
1890 struct cfg80211_assoc_request *req)
1891{
1892 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1893 struct ieee80211_bss *bss = (void *)req->bss->priv;
1894 struct ieee80211_work *wk;
1895 const u8 *ssid;
1896 int i;
2449 1897
2450 wk = krealloc(found, sizeof(*wk) + req->ie_len, GFP_KERNEL); 1898 mutex_lock(&ifmgd->mtx);
2451 if (!wk) { 1899 if (ifmgd->associated) {
2452 list_add(&found->list, &ifmgd->work_list); 1900 if (!req->prev_bssid ||
2453 err = -ENOMEM; 1901 memcmp(req->prev_bssid, ifmgd->associated->bssid,
2454 goto out; 1902 ETH_ALEN)) {
1903 /*
1904 * We are already associated and the request was not a
1905 * reassociation request from the current BSS, so
1906 * reject it.
1907 */
1908 mutex_unlock(&ifmgd->mtx);
1909 return -EALREADY;
1910 }
1911
1912 /* Trying to reassociate - clear previous association state */
1913 ieee80211_set_disassoc(sdata);
2455 } 1914 }
1915 mutex_unlock(&ifmgd->mtx);
2456 1916
2457 list_add(&wk->list, &ifmgd->work_list); 1917 wk = kzalloc(sizeof(*wk) + req->ie_len, GFP_KERNEL);
1918 if (!wk)
1919 return -ENOMEM;
2458 1920
2459 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N; 1921 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
1922 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
2460 1923
2461 for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) 1924 for (i = 0; i < req->crypto.n_ciphers_pairwise; i++)
2462 if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 || 1925 if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
@@ -2464,8 +1927,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2464 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) 1927 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104)
2465 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 1928 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
2466 1929
2467 sdata->local->oper_channel = req->bss->channel;
2468 ieee80211_hw_config(sdata->local, 0);
2469 1930
2470 if (req->ie && req->ie_len) { 1931 if (req->ie && req->ie_len) {
2471 memcpy(wk->ie, req->ie, req->ie_len); 1932 memcpy(wk->ie, req->ie, req->ie_len);
@@ -2473,12 +1934,55 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2473 } else 1934 } else
2474 wk->ie_len = 0; 1935 wk->ie_len = 0;
2475 1936
1937 wk->assoc.bss = req->bss;
1938
1939 memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN);
1940
1941 /* new association always uses requested smps mode */
1942 if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) {
1943 if (ifmgd->powersave)
1944 ifmgd->ap_smps = IEEE80211_SMPS_DYNAMIC;
1945 else
1946 ifmgd->ap_smps = IEEE80211_SMPS_OFF;
1947 } else
1948 ifmgd->ap_smps = ifmgd->req_smps;
1949
1950 wk->assoc.smps = ifmgd->ap_smps;
1951 /*
1952 * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode.
1953 * We still associate in non-HT mode (11a/b/g) if any one of these
1954 * ciphers is configured as pairwise.
1955 * We can set this to true for non-11n hardware, that'll be checked
1956 * separately along with the peer capabilities.
1957 */
1958 wk->assoc.use_11n = !(ifmgd->flags & IEEE80211_STA_DISABLE_11N);
1959 wk->assoc.capability = req->bss->capability;
1960 wk->assoc.wmm_used = bss->wmm_used;
1961 wk->assoc.supp_rates = bss->supp_rates;
1962 wk->assoc.supp_rates_len = bss->supp_rates_len;
1963 wk->assoc.ht_information_ie =
1964 ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_INFORMATION);
1965
1966 if (bss->wmm_used && bss->uapsd_supported &&
1967 (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
1968 wk->assoc.uapsd_used = true;
1969 ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
1970 } else {
1971 wk->assoc.uapsd_used = false;
1972 ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED;
1973 }
1974
1975 ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
1976 memcpy(wk->assoc.ssid, ssid + 2, ssid[1]);
1977 wk->assoc.ssid_len = ssid[1];
1978
2476 if (req->prev_bssid) 1979 if (req->prev_bssid)
2477 memcpy(wk->prev_bssid, req->prev_bssid, ETH_ALEN); 1980 memcpy(wk->assoc.prev_bssid, req->prev_bssid, ETH_ALEN);
2478 1981
2479 wk->state = IEEE80211_MGD_STATE_ASSOC; 1982 wk->type = IEEE80211_WORK_ASSOC;
2480 wk->tries = 0; 1983 wk->chan = req->bss->channel;
2481 wk->timeout = jiffies; /* run right away */ 1984 wk->sdata = sdata;
1985 wk->done = ieee80211_assoc_done;
2482 1986
2483 if (req->use_mfp) { 1987 if (req->use_mfp) {
2484 ifmgd->mfp = IEEE80211_MFP_REQUIRED; 1988 ifmgd->mfp = IEEE80211_MFP_REQUIRED;
@@ -2493,69 +1997,65 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2493 else 1997 else
2494 ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT; 1998 ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT;
2495 1999
2496 ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.work); 2000 ieee80211_add_work(wk);
2497 2001 return 0;
2498 err = 0;
2499
2500 out:
2501 mutex_unlock(&ifmgd->mtx);
2502 return err;
2503} 2002}
2504 2003
2505int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, 2004int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2506 struct cfg80211_deauth_request *req, 2005 struct cfg80211_deauth_request *req,
2507 void *cookie) 2006 void *cookie)
2508{ 2007{
2008 struct ieee80211_local *local = sdata->local;
2509 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2009 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2510 struct ieee80211_mgd_work *wk; 2010 struct ieee80211_work *wk;
2511 const u8 *bssid = NULL; 2011 const u8 *bssid = req->bss->bssid;
2512 bool not_auth_yet = false;
2513 2012
2514 mutex_lock(&ifmgd->mtx); 2013 mutex_lock(&ifmgd->mtx);
2515 2014
2516 if (ifmgd->associated && &ifmgd->associated->cbss == req->bss) { 2015 if (ifmgd->associated == req->bss) {
2517 bssid = req->bss->bssid; 2016 bssid = req->bss->bssid;
2518 ieee80211_set_disassoc(sdata, true); 2017 ieee80211_set_disassoc(sdata);
2519 } else list_for_each_entry(wk, &ifmgd->work_list, list) {
2520 if (&wk->bss->cbss == req->bss) {
2521 bssid = req->bss->bssid;
2522 if (wk->state == IEEE80211_MGD_STATE_PROBE)
2523 not_auth_yet = true;
2524 list_del(&wk->list);
2525 kfree(wk);
2526 break;
2527 }
2528 }
2529
2530 /*
2531 * If somebody requests authentication and we haven't
2532 * sent out an auth frame yet there's no need to send
2533 * out a deauth frame either. If the state was PROBE,
2534 * then this is the case. If it's AUTH we have sent a
2535 * frame, and if it's IDLE we have completed the auth
2536 * process already.
2537 */
2538 if (not_auth_yet) {
2539 mutex_unlock(&ifmgd->mtx); 2018 mutex_unlock(&ifmgd->mtx);
2540 __cfg80211_auth_canceled(sdata->dev, bssid); 2019 } else {
2541 return 0; 2020 bool not_auth_yet = false;
2542 }
2543 2021
2544 /*
2545 * cfg80211 should catch this ... but it's racy since
2546 * we can receive a deauth frame, process it, hand it
2547 * to cfg80211 while that's in a locked section already
2548 * trying to tell us that the user wants to disconnect.
2549 */
2550 if (!bssid) {
2551 mutex_unlock(&ifmgd->mtx); 2022 mutex_unlock(&ifmgd->mtx);
2552 return -ENOLINK;
2553 }
2554 2023
2555 mutex_unlock(&ifmgd->mtx); 2024 mutex_lock(&local->work_mtx);
2025 list_for_each_entry(wk, &local->work_list, list) {
2026 if (wk->sdata != sdata)
2027 continue;
2028
2029 if (wk->type != IEEE80211_WORK_DIRECT_PROBE &&
2030 wk->type != IEEE80211_WORK_AUTH)
2031 continue;
2032
2033 if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN))
2034 continue;
2035
2036 not_auth_yet = wk->type == IEEE80211_WORK_DIRECT_PROBE;
2037 list_del_rcu(&wk->list);
2038 free_work(wk);
2039 break;
2040 }
2041 mutex_unlock(&local->work_mtx);
2042
2043 /*
2044 * If somebody requests authentication and we haven't
2045 * sent out an auth frame yet there's no need to send
2046 * out a deauth frame either. If the state was PROBE,
2047 * then this is the case. If it's AUTH we have sent a
2048 * frame, and if it's IDLE we have completed the auth
2049 * process already.
2050 */
2051 if (not_auth_yet) {
2052 __cfg80211_auth_canceled(sdata->dev, bssid);
2053 return 0;
2054 }
2055 }
2556 2056
2557 printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n", 2057 printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n",
2558 sdata->dev->name, bssid, req->reason_code); 2058 sdata->name, bssid, req->reason_code);
2559 2059
2560 ieee80211_send_deauth_disassoc(sdata, bssid, 2060 ieee80211_send_deauth_disassoc(sdata, bssid,
2561 IEEE80211_STYPE_DEAUTH, req->reason_code, 2061 IEEE80211_STYPE_DEAUTH, req->reason_code,
@@ -2580,15 +2080,15 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2580 * to cfg80211 while that's in a locked section already 2080 * to cfg80211 while that's in a locked section already
2581 * trying to tell us that the user wants to disconnect. 2081 * trying to tell us that the user wants to disconnect.
2582 */ 2082 */
2583 if (&ifmgd->associated->cbss != req->bss) { 2083 if (ifmgd->associated != req->bss) {
2584 mutex_unlock(&ifmgd->mtx); 2084 mutex_unlock(&ifmgd->mtx);
2585 return -ENOLINK; 2085 return -ENOLINK;
2586 } 2086 }
2587 2087
2588 printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n", 2088 printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n",
2589 sdata->dev->name, req->bss->bssid, req->reason_code); 2089 sdata->name, req->bss->bssid, req->reason_code);
2590 2090
2591 ieee80211_set_disassoc(sdata, false); 2091 ieee80211_set_disassoc(sdata);
2592 2092
2593 mutex_unlock(&ifmgd->mtx); 2093 mutex_unlock(&ifmgd->mtx);
2594 2094
@@ -2600,3 +2100,38 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2600 2100
2601 return 0; 2101 return 0;
2602} 2102}
2103
2104int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
2105 struct ieee80211_channel *chan,
2106 enum nl80211_channel_type channel_type,
2107 const u8 *buf, size_t len, u64 *cookie)
2108{
2109 struct ieee80211_local *local = sdata->local;
2110 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2111 struct sk_buff *skb;
2112
2113 /* Check that we are on the requested channel for transmission */
2114 if ((chan != local->tmp_channel ||
2115 channel_type != local->tmp_channel_type) &&
2116 (chan != local->oper_channel ||
2117 channel_type != local->oper_channel_type))
2118 return -EBUSY;
2119
2120 skb = dev_alloc_skb(local->hw.extra_tx_headroom + len);
2121 if (!skb)
2122 return -ENOMEM;
2123 skb_reserve(skb, local->hw.extra_tx_headroom);
2124
2125 memcpy(skb_put(skb, len), buf, len);
2126
2127 if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
2128 IEEE80211_SKB_CB(skb)->flags |=
2129 IEEE80211_TX_INTFL_DONT_ENCRYPT;
2130 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX |
2131 IEEE80211_TX_CTL_REQ_TX_STATUS;
2132 skb->dev = sdata->dev;
2133 ieee80211_tx_skb(sdata, skb);
2134
2135 *cookie = (unsigned long) skb;
2136 return 0;
2137}
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
new file mode 100644
index 000000000000..c36b1911987a
--- /dev/null
+++ b/net/mac80211/offchannel.c
@@ -0,0 +1,170 @@
1/*
2 * Off-channel operation helpers
3 *
4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
5 * Copyright 2004, Instant802 Networks, Inc.
6 * Copyright 2005, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15#include <net/mac80211.h>
16#include "ieee80211_i.h"
17
18/*
19 * inform AP that we will go to sleep so that it will buffer the frames
20 * while we scan
21 */
22static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
23{
24 struct ieee80211_local *local = sdata->local;
25
26 local->offchannel_ps_enabled = false;
27
28 /* FIXME: what to do when local->pspolling is true? */
29
30 del_timer_sync(&local->dynamic_ps_timer);
31 cancel_work_sync(&local->dynamic_ps_enable_work);
32
33 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
34 local->offchannel_ps_enabled = true;
35 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
36 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
37 }
38
39 if (!(local->offchannel_ps_enabled) ||
40 !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
41 /*
42 * If power save was enabled, no need to send a nullfunc
43 * frame because AP knows that we are sleeping. But if the
44 * hardware is creating the nullfunc frame for power save
45 * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
46 * enabled) and power save was enabled, the firmware just
47 * sent a null frame with power save disabled. So we need
48 * to send a new nullfunc frame to inform the AP that we
49 * are again sleeping.
50 */
51 ieee80211_send_nullfunc(local, sdata, 1);
52}
53
54/* inform AP that we are awake again, unless power save is enabled */
55static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
56{
57 struct ieee80211_local *local = sdata->local;
58
59 if (!local->ps_sdata)
60 ieee80211_send_nullfunc(local, sdata, 0);
61 else if (local->offchannel_ps_enabled) {
62 /*
63 * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
64 * will send a nullfunc frame with the powersave bit set
65 * even though the AP already knows that we are sleeping.
66 * This could be avoided by sending a null frame with power
67 * save bit disabled before enabling the power save, but
68 * this doesn't gain anything.
69 *
70 * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
71 * to send a nullfunc frame because AP already knows that
72 * we are sleeping, let's just enable power save mode in
73 * hardware.
74 */
75 local->hw.conf.flags |= IEEE80211_CONF_PS;
76 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
77 } else if (local->hw.conf.dynamic_ps_timeout > 0) {
78 /*
79 * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
80 * had been running before leaving the operating channel,
81 * restart the timer now and send a nullfunc frame to inform
82 * the AP that we are awake.
83 */
84 ieee80211_send_nullfunc(local, sdata, 0);
85 mod_timer(&local->dynamic_ps_timer, jiffies +
86 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
87 }
88}
89
90void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
91{
92 struct ieee80211_sub_if_data *sdata;
93
94 mutex_lock(&local->iflist_mtx);
95 list_for_each_entry(sdata, &local->interfaces, list) {
96 if (!ieee80211_sdata_running(sdata))
97 continue;
98
99 /* disable beaconing */
100 if (sdata->vif.type == NL80211_IFTYPE_AP ||
101 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
102 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
103 ieee80211_bss_info_change_notify(
104 sdata, BSS_CHANGED_BEACON_ENABLED);
105
106 /*
107 * only handle non-STA interfaces here, STA interfaces
108 * are handled in ieee80211_offchannel_stop_station(),
109 * e.g., from the background scan state machine.
110 *
111 * In addition, do not stop monitor interface to allow it to be
112 * used from user space controlled off-channel operations.
113 */
114 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
115 sdata->vif.type != NL80211_IFTYPE_MONITOR)
116 netif_tx_stop_all_queues(sdata->dev);
117 }
118 mutex_unlock(&local->iflist_mtx);
119}
120
121void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
122{
123 struct ieee80211_sub_if_data *sdata;
124
125 /*
126 * notify the AP about us leaving the channel and stop all STA interfaces
127 */
128 mutex_lock(&local->iflist_mtx);
129 list_for_each_entry(sdata, &local->interfaces, list) {
130 if (!ieee80211_sdata_running(sdata))
131 continue;
132
133 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
134 netif_tx_stop_all_queues(sdata->dev);
135 if (sdata->u.mgd.associated)
136 ieee80211_offchannel_ps_enable(sdata);
137 }
138 }
139 mutex_unlock(&local->iflist_mtx);
140}
141
142void ieee80211_offchannel_return(struct ieee80211_local *local,
143 bool enable_beaconing)
144{
145 struct ieee80211_sub_if_data *sdata;
146
147 mutex_lock(&local->iflist_mtx);
148 list_for_each_entry(sdata, &local->interfaces, list) {
149 if (!ieee80211_sdata_running(sdata))
150 continue;
151
152 /* Tell AP we're back */
153 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
154 if (sdata->u.mgd.associated)
155 ieee80211_offchannel_ps_disable(sdata);
156 }
157
158 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
159 netif_tx_wake_all_queues(sdata->dev);
160
161 /* re-enable beaconing */
162 if (enable_beaconing &&
163 (sdata->vif.type == NL80211_IFTYPE_AP ||
164 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
165 sdata->vif.type == NL80211_IFTYPE_MESH_POINT))
166 ieee80211_bss_info_change_notify(
167 sdata, BSS_CHANGED_BEACON_ENABLED);
168 }
169 mutex_unlock(&local->iflist_mtx);
170}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index e535f1c988fe..0e64484e861c 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -10,9 +10,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
10{ 10{
11 struct ieee80211_local *local = hw_to_local(hw); 11 struct ieee80211_local *local = hw_to_local(hw);
12 struct ieee80211_sub_if_data *sdata; 12 struct ieee80211_sub_if_data *sdata;
13 struct ieee80211_if_init_conf conf;
14 struct sta_info *sta; 13 struct sta_info *sta;
15 unsigned long flags;
16 14
17 ieee80211_scan_cancel(local); 15 ieee80211_scan_cancel(local);
18 16
@@ -56,22 +54,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
56 rcu_read_unlock(); 54 rcu_read_unlock();
57 55
58 /* remove STAs */ 56 /* remove STAs */
59 spin_lock_irqsave(&local->sta_lock, flags); 57 mutex_lock(&local->sta_mtx);
60 list_for_each_entry(sta, &local->sta_list, list) { 58 list_for_each_entry(sta, &local->sta_list, list) {
61 if (local->ops->sta_notify) { 59 if (sta->uploaded) {
62 sdata = sta->sdata; 60 sdata = sta->sdata;
63 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 61 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
64 sdata = container_of(sdata->bss, 62 sdata = container_of(sdata->bss,
65 struct ieee80211_sub_if_data, 63 struct ieee80211_sub_if_data,
66 u.ap); 64 u.ap);
67 65
68 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE, 66 drv_sta_remove(local, sdata, &sta->sta);
69 &sta->sta);
70 } 67 }
71 68
72 mesh_plink_quiesce(sta); 69 mesh_plink_quiesce(sta);
73 } 70 }
74 spin_unlock_irqrestore(&local->sta_lock, flags); 71 mutex_unlock(&local->sta_mtx);
75 72
76 /* remove all interfaces */ 73 /* remove all interfaces */
77 list_for_each_entry(sdata, &local->interfaces, list) { 74 list_for_each_entry(sdata, &local->interfaces, list) {
@@ -93,17 +90,14 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
93 break; 90 break;
94 } 91 }
95 92
96 if (!netif_running(sdata->dev)) 93 if (!ieee80211_sdata_running(sdata))
97 continue; 94 continue;
98 95
99 /* disable beaconing */ 96 /* disable beaconing */
100 ieee80211_bss_info_change_notify(sdata, 97 ieee80211_bss_info_change_notify(sdata,
101 BSS_CHANGED_BEACON_ENABLED); 98 BSS_CHANGED_BEACON_ENABLED);
102 99
103 conf.vif = &sdata->vif; 100 drv_remove_interface(local, &sdata->vif);
104 conf.type = sdata->vif.type;
105 conf.mac_addr = sdata->dev->dev_addr;
106 drv_remove_interface(local, &conf);
107 } 101 }
108 102
109 /* stop hardware - this must stop RX */ 103 /* stop hardware - this must stop RX */
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index b9007f80cb92..0b299d236fa1 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -145,7 +145,7 @@ static const struct file_operations rcname_ops = {
145}; 145};
146#endif 146#endif
147 147
148struct rate_control_ref *rate_control_alloc(const char *name, 148static struct rate_control_ref *rate_control_alloc(const char *name,
149 struct ieee80211_local *local) 149 struct ieee80211_local *local)
150{ 150{
151 struct dentry *debugfsdir = NULL; 151 struct dentry *debugfsdir = NULL;
@@ -207,6 +207,27 @@ static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
207 return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc)); 207 return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc));
208} 208}
209 209
210static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, u8 max_rate_idx)
211{
212 u8 i;
213
214 if (basic_rates == 0)
215 return; /* assume basic rates unknown and accept rate */
216 if (*idx < 0)
217 return;
218 if (basic_rates & (1 << *idx))
219 return; /* selected rate is a basic rate */
220
221 for (i = *idx + 1; i <= max_rate_idx; i++) {
222 if (basic_rates & (1 << i)) {
223 *idx = i;
224 return;
225 }
226 }
227
228 /* could not find a basic rate; use original selection */
229}
230
210bool rate_control_send_low(struct ieee80211_sta *sta, 231bool rate_control_send_low(struct ieee80211_sta *sta,
211 void *priv_sta, 232 void *priv_sta,
212 struct ieee80211_tx_rate_control *txrc) 233 struct ieee80211_tx_rate_control *txrc)
@@ -218,12 +239,48 @@ bool rate_control_send_low(struct ieee80211_sta *sta,
218 info->control.rates[0].count = 239 info->control.rates[0].count =
219 (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 240 (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
220 1 : txrc->hw->max_rate_tries; 241 1 : txrc->hw->max_rate_tries;
242 if (!sta && txrc->ap)
243 rc_send_low_broadcast(&info->control.rates[0].idx,
244 txrc->bss_conf->basic_rates,
245 txrc->sband->n_bitrates);
221 return true; 246 return true;
222 } 247 }
223 return false; 248 return false;
224} 249}
225EXPORT_SYMBOL(rate_control_send_low); 250EXPORT_SYMBOL(rate_control_send_low);
226 251
252static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
253 int n_bitrates, u32 mask)
254{
255 int j;
256
257 /* See whether the selected rate or anything below it is allowed. */
258 for (j = rate->idx; j >= 0; j--) {
259 if (mask & (1 << j)) {
260 /* Okay, found a suitable rate. Use it. */
261 rate->idx = j;
262 return;
263 }
264 }
265
266 /* Try to find a higher rate that would be allowed */
267 for (j = rate->idx + 1; j < n_bitrates; j++) {
268 if (mask & (1 << j)) {
269 /* Okay, found a suitable rate. Use it. */
270 rate->idx = j;
271 return;
272 }
273 }
274
275 /*
276 * Uh.. No suitable rate exists. This should not really happen with
277 * sane TX rate mask configurations. However, should someone manage to
278 * configure supported rates and TX rate mask in incompatible way,
279 * allow the frame to be transmitted with whatever the rate control
280 * selected.
281 */
282}
283
227void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, 284void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
228 struct sta_info *sta, 285 struct sta_info *sta,
229 struct ieee80211_tx_rate_control *txrc) 286 struct ieee80211_tx_rate_control *txrc)
@@ -233,6 +290,7 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
233 struct ieee80211_sta *ista = NULL; 290 struct ieee80211_sta *ista = NULL;
234 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); 291 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
235 int i; 292 int i;
293 u32 mask;
236 294
237 if (sta) { 295 if (sta) {
238 ista = &sta->sta; 296 ista = &sta->sta;
@@ -245,23 +303,34 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
245 info->control.rates[i].count = 1; 303 info->control.rates[i].count = 1;
246 } 304 }
247 305
248 if (sta && sdata->force_unicast_rateidx > -1) { 306 if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
249 info->control.rates[0].idx = sdata->force_unicast_rateidx; 307 return;
250 } else { 308
251 ref->ops->get_rate(ref->priv, ista, priv_sta, txrc); 309 ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
252 info->flags |= IEEE80211_TX_INTFL_RCALGO;
253 }
254 310
255 /* 311 /*
256 * try to enforce the maximum rate the user wanted 312 * Try to enforce the rateidx mask the user wanted. skip this if the
313 * default mask (allow all rates) is used to save some processing for
314 * the common case.
257 */ 315 */
258 if (sdata->max_ratectrl_rateidx > -1) 316 mask = sdata->rc_rateidx_mask[info->band];
317 if (mask != (1 << txrc->sband->n_bitrates) - 1) {
318 if (sta) {
319 /* Filter out rates that the STA does not support */
320 mask &= sta->sta.supp_rates[info->band];
321 }
322 /*
323 * Make sure the rate index selected for each TX rate is
324 * included in the configured mask and change the rate indexes
325 * if needed.
326 */
259 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 327 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
328 /* Rate masking supports only legacy rates for now */
260 if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS) 329 if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS)
261 continue; 330 continue;
262 info->control.rates[i].idx = 331 rate_idx_match_mask(&info->control.rates[i],
263 min_t(s8, info->control.rates[i].idx, 332 txrc->sband->n_bitrates, mask);
264 sdata->max_ratectrl_rateidx); 333 }
265 } 334 }
266 335
267 BUG_ON(info->control.rates[0].idx < 0); 336 BUG_ON(info->control.rates[0].idx < 0);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index cb9bd1f65e27..065a96190e32 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -26,10 +26,6 @@ struct rate_control_ref {
26 struct kref kref; 26 struct kref kref;
27}; 27};
28 28
29/* Get a reference to the rate control algorithm. If `name' is NULL, get the
30 * first available algorithm. */
31struct rate_control_ref *rate_control_alloc(const char *name,
32 struct ieee80211_local *local);
33void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, 29void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
34 struct sta_info *sta, 30 struct sta_info *sta,
35 struct ieee80211_tx_rate_control *txrc); 31 struct ieee80211_tx_rate_control *txrc);
@@ -44,10 +40,11 @@ static inline void rate_control_tx_status(struct ieee80211_local *local,
44 struct rate_control_ref *ref = local->rate_ctrl; 40 struct rate_control_ref *ref = local->rate_ctrl;
45 struct ieee80211_sta *ista = &sta->sta; 41 struct ieee80211_sta *ista = &sta->sta;
46 void *priv_sta = sta->rate_ctrl_priv; 42 void *priv_sta = sta->rate_ctrl_priv;
47 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
48 43
49 if (likely(info->flags & IEEE80211_TX_INTFL_RCALGO)) 44 if (!ref)
50 ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb); 45 return;
46
47 ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
51} 48}
52 49
53 50
@@ -69,7 +66,8 @@ static inline void rate_control_rate_init(struct sta_info *sta)
69 66
70static inline void rate_control_rate_update(struct ieee80211_local *local, 67static inline void rate_control_rate_update(struct ieee80211_local *local,
71 struct ieee80211_supported_band *sband, 68 struct ieee80211_supported_band *sband,
72 struct sta_info *sta, u32 changed) 69 struct sta_info *sta, u32 changed,
70 enum nl80211_channel_type oper_chan_type)
73{ 71{
74 struct rate_control_ref *ref = local->rate_ctrl; 72 struct rate_control_ref *ref = local->rate_ctrl;
75 struct ieee80211_sta *ista = &sta->sta; 73 struct ieee80211_sta *ista = &sta->sta;
@@ -77,7 +75,7 @@ static inline void rate_control_rate_update(struct ieee80211_local *local,
77 75
78 if (ref && ref->ops->rate_update) 76 if (ref && ref->ops->rate_update)
79 ref->ops->rate_update(ref->priv, sband, ista, 77 ref->ops->rate_update(ref->priv, sband, ista,
80 priv_sta, changed); 78 priv_sta, changed, oper_chan_type);
81} 79}
82 80
83static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, 81static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
@@ -115,7 +113,8 @@ static inline void rate_control_remove_sta_debugfs(struct sta_info *sta)
115#endif 113#endif
116} 114}
117 115
118/* functions for rate control related to a device */ 116/* Get a reference to the rate control algorithm. If `name' is NULL, get the
117 * first available algorithm. */
119int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, 118int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
120 const char *name); 119 const char *name);
121void rate_control_deinitialize(struct ieee80211_local *local); 120void rate_control_deinitialize(struct ieee80211_local *local);
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index 699d3ed869c4..2652a374974e 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -157,9 +157,7 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
157 157
158 /* In case nothing happened during the previous control interval, turn 158 /* In case nothing happened during the previous control interval, turn
159 * the sharpening factor on. */ 159 * the sharpening factor on. */
160 period = (HZ * pinfo->sampling_period + 500) / 1000; 160 period = msecs_to_jiffies(pinfo->sampling_period);
161 if (!period)
162 period = 1;
163 if (jiffies - spinfo->last_sample > 2 * period) 161 if (jiffies - spinfo->last_sample > 2 * period)
164 spinfo->sharp_cnt = pinfo->sharpen_duration; 162 spinfo->sharp_cnt = pinfo->sharpen_duration;
165 163
@@ -190,7 +188,7 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
190 rate_control_pid_normalize(pinfo, sband->n_bitrates); 188 rate_control_pid_normalize(pinfo, sband->n_bitrates);
191 189
192 /* Compute the proportional, integral and derivative errors. */ 190 /* Compute the proportional, integral and derivative errors. */
193 err_prop = (pinfo->target << RC_PID_ARITH_SHIFT) - pf; 191 err_prop = (pinfo->target - pf) << RC_PID_ARITH_SHIFT;
194 192
195 err_avg = spinfo->err_avg_sc >> pinfo->smoothing_shift; 193 err_avg = spinfo->err_avg_sc >> pinfo->smoothing_shift;
196 spinfo->err_avg_sc = spinfo->err_avg_sc - err_avg + err_prop; 194 spinfo->err_avg_sc = spinfo->err_avg_sc - err_avg + err_prop;
@@ -252,9 +250,7 @@ static void rate_control_pid_tx_status(void *priv, struct ieee80211_supported_ba
252 } 250 }
253 251
254 /* Update PID controller state. */ 252 /* Update PID controller state. */
255 period = (HZ * pinfo->sampling_period + 500) / 1000; 253 period = msecs_to_jiffies(pinfo->sampling_period);
256 if (!period)
257 period = 1;
258 if (time_after(jiffies, spinfo->last_sample + period)) 254 if (time_after(jiffies, spinfo->last_sample + period))
259 rate_control_pid_sample(pinfo, sband, sta, spinfo); 255 rate_control_pid_sample(pinfo, sband, sta, spinfo);
260} 256}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 9f2807aeaf52..b5c48de81d8b 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2,7 +2,7 @@
2 * Copyright 2002-2005, Instant802 Networks, Inc. 2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> 5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -283,15 +283,15 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
283 skb->protocol = htons(ETH_P_802_2); 283 skb->protocol = htons(ETH_P_802_2);
284 284
285 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 285 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
286 if (!netif_running(sdata->dev))
287 continue;
288
289 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 286 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
290 continue; 287 continue;
291 288
292 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) 289 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
293 continue; 290 continue;
294 291
292 if (!ieee80211_sdata_running(sdata))
293 continue;
294
295 if (prev_dev) { 295 if (prev_dev) {
296 skb2 = skb_clone(skb, GFP_ATOMIC); 296 skb2 = skb_clone(skb, GFP_ATOMIC);
297 if (skb2) { 297 if (skb2) {
@@ -361,7 +361,9 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
361 * boundary. In the case of regular frames, this simply means aligning the 361 * boundary. In the case of regular frames, this simply means aligning the
362 * payload to a four-byte boundary (because either the IP header is directly 362 * payload to a four-byte boundary (because either the IP header is directly
363 * contained, or IV/RFC1042 headers that have a length divisible by four are 363 * contained, or IV/RFC1042 headers that have a length divisible by four are
364 * in front of it). 364 * in front of it). If the payload data is not properly aligned and the
365 * architecture doesn't support efficient unaligned operations, mac80211
366 * will align the data.
365 * 367 *
366 * With A-MSDU frames, however, the payload data address must yield two modulo 368 * With A-MSDU frames, however, the payload data address must yield two modulo
367 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 369 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
@@ -375,25 +377,10 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
375 */ 377 */
376static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 378static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
377{ 379{
378 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 380#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
379 int hdrlen; 381 WARN_ONCE((unsigned long)rx->skb->data & 1,
380 382 "unaligned packet at 0x%p\n", rx->skb->data);
381#ifndef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
382 return;
383#endif 383#endif
384
385 if (WARN_ONCE((unsigned long)rx->skb->data & 1,
386 "unaligned packet at 0x%p\n", rx->skb->data))
387 return;
388
389 if (!ieee80211_is_data_present(hdr->frame_control))
390 return;
391
392 hdrlen = ieee80211_hdrlen(hdr->frame_control);
393 if (rx->flags & IEEE80211_RX_AMSDU)
394 hdrlen += ETH_HLEN;
395 WARN_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3,
396 "unaligned IP payload at 0x%p\n", rx->skb->data + hdrlen);
397} 384}
398 385
399 386
@@ -476,7 +463,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
476{ 463{
477 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 464 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
478 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); 465 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
479 char *dev_addr = rx->sdata->dev->dev_addr; 466 char *dev_addr = rx->sdata->vif.addr;
480 467
481 if (ieee80211_is_data(hdr->frame_control)) { 468 if (ieee80211_is_data(hdr->frame_control)) {
482 if (is_multicast_ether_addr(hdr->addr1)) { 469 if (is_multicast_ether_addr(hdr->addr1)) {
@@ -1021,10 +1008,10 @@ static void ap_sta_ps_start(struct sta_info *sta)
1021 1008
1022 atomic_inc(&sdata->bss->num_sta_ps); 1009 atomic_inc(&sdata->bss->num_sta_ps);
1023 set_sta_flags(sta, WLAN_STA_PS_STA); 1010 set_sta_flags(sta, WLAN_STA_PS_STA);
1024 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta); 1011 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1025#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1012#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1026 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", 1013 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
1027 sdata->dev->name, sta->sta.addr, sta->sta.aid); 1014 sdata->name, sta->sta.addr, sta->sta.aid);
1028#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1015#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1029} 1016}
1030 1017
@@ -1038,13 +1025,13 @@ static void ap_sta_ps_end(struct sta_info *sta)
1038 1025
1039#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1026#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1040 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", 1027 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
1041 sdata->dev->name, sta->sta.addr, sta->sta.aid); 1028 sdata->name, sta->sta.addr, sta->sta.aid);
1042#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1029#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1043 1030
1044 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) { 1031 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
1045#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1032#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1046 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n", 1033 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1047 sdata->dev->name, sta->sta.addr, sta->sta.aid); 1034 sdata->name, sta->sta.addr, sta->sta.aid);
1048#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1035#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1049 return; 1036 return;
1050 } 1037 }
@@ -1124,6 +1111,18 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1124 if (ieee80211_is_nullfunc(hdr->frame_control) || 1111 if (ieee80211_is_nullfunc(hdr->frame_control) ||
1125 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 1112 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1126 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1113 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1114
1115 /*
1116 * If we receive a 4-addr nullfunc frame from a STA
1117 * that was not moved to a 4-addr STA vlan yet, drop
1118 * the frame to the monitor interface, to make sure
1119 * that hostapd sees it
1120 */
1121 if (ieee80211_has_a4(hdr->frame_control) &&
1122 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1123 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1124 !rx->sdata->u.vlan.sta)))
1125 return RX_DROP_MONITOR;
1127 /* 1126 /*
1128 * Update counter and free packet here to avoid 1127 * Update counter and free packet here to avoid
1129 * counting this as a dropped packed. 1128 * counting this as a dropped packed.
@@ -1156,7 +1155,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1156 printk(KERN_DEBUG "%s: RX reassembly removed oldest " 1155 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
1157 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d " 1156 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
1158 "addr1=%pM addr2=%pM\n", 1157 "addr1=%pM addr2=%pM\n",
1159 sdata->dev->name, idx, 1158 sdata->name, idx,
1160 jiffies - entry->first_frag_time, entry->seq, 1159 jiffies - entry->first_frag_time, entry->seq,
1161 entry->last_frag, hdr->addr1, hdr->addr2); 1160 entry->last_frag, hdr->addr1, hdr->addr2);
1162#endif 1161#endif
@@ -1398,6 +1397,21 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1398 ieee80211_is_data(fc) && 1397 ieee80211_is_data(fc) &&
1399 (rx->key || rx->sdata->drop_unencrypted))) 1398 (rx->key || rx->sdata->drop_unencrypted)))
1400 return -EACCES; 1399 return -EACCES;
1400
1401 return 0;
1402}
1403
1404static int
1405ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1406{
1407 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1408 __le16 fc = hdr->frame_control;
1409 int res;
1410
1411 res = ieee80211_drop_unencrypted(rx, fc);
1412 if (unlikely(res))
1413 return res;
1414
1401 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) { 1415 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
1402 if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 1416 if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1403 rx->key)) 1417 rx->key))
@@ -1424,7 +1438,6 @@ static int
1424__ieee80211_data_to_8023(struct ieee80211_rx_data *rx) 1438__ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1425{ 1439{
1426 struct ieee80211_sub_if_data *sdata = rx->sdata; 1440 struct ieee80211_sub_if_data *sdata = rx->sdata;
1427 struct net_device *dev = sdata->dev;
1428 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1441 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1429 1442
1430 if (ieee80211_has_a4(hdr->frame_control) && 1443 if (ieee80211_has_a4(hdr->frame_control) &&
@@ -1436,7 +1449,7 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1436 (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr))) 1449 (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr)))
1437 return -1; 1450 return -1;
1438 1451
1439 return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type); 1452 return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1440} 1453}
1441 1454
1442/* 1455/*
@@ -1453,7 +1466,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1453 * of whether the frame was encrypted or not. 1466 * of whether the frame was encrypted or not.
1454 */ 1467 */
1455 if (ehdr->h_proto == htons(ETH_P_PAE) && 1468 if (ehdr->h_proto == htons(ETH_P_PAE) &&
1456 (compare_ether_addr(ehdr->h_dest, rx->sdata->dev->dev_addr) == 0 || 1469 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
1457 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) 1470 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1458 return true; 1471 return true;
1459 1472
@@ -1472,7 +1485,6 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1472{ 1485{
1473 struct ieee80211_sub_if_data *sdata = rx->sdata; 1486 struct ieee80211_sub_if_data *sdata = rx->sdata;
1474 struct net_device *dev = sdata->dev; 1487 struct net_device *dev = sdata->dev;
1475 struct ieee80211_local *local = rx->local;
1476 struct sk_buff *skb, *xmit_skb; 1488 struct sk_buff *skb, *xmit_skb;
1477 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 1489 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1478 struct sta_info *dsta; 1490 struct sta_info *dsta;
@@ -1495,8 +1507,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1495 printk(KERN_DEBUG "%s: failed to clone " 1507 printk(KERN_DEBUG "%s: failed to clone "
1496 "multicast frame\n", dev->name); 1508 "multicast frame\n", dev->name);
1497 } else { 1509 } else {
1498 dsta = sta_info_get(local, skb->data); 1510 dsta = sta_info_get(sdata, skb->data);
1499 if (dsta && dsta->sdata->dev == dev) { 1511 if (dsta) {
1500 /* 1512 /*
1501 * The destination station is associated to 1513 * The destination station is associated to
1502 * this AP (in this VLAN), so send the frame 1514 * this AP (in this VLAN), so send the frame
@@ -1512,7 +1524,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1512 if (skb) { 1524 if (skb) {
1513 int align __maybe_unused; 1525 int align __maybe_unused;
1514 1526
1515#if defined(CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT) || !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 1527#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1516 /* 1528 /*
1517 * 'align' will only take the values 0 or 2 here 1529 * 'align' will only take the values 0 or 2 here
1518 * since all frames are required to be aligned 1530 * since all frames are required to be aligned
@@ -1556,16 +1568,10 @@ static ieee80211_rx_result debug_noinline
1556ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 1568ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1557{ 1569{
1558 struct net_device *dev = rx->sdata->dev; 1570 struct net_device *dev = rx->sdata->dev;
1559 struct ieee80211_local *local = rx->local; 1571 struct sk_buff *skb = rx->skb;
1560 u16 ethertype;
1561 u8 *payload;
1562 struct sk_buff *skb = rx->skb, *frame = NULL;
1563 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1572 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1564 __le16 fc = hdr->frame_control; 1573 __le16 fc = hdr->frame_control;
1565 const struct ethhdr *eth; 1574 struct sk_buff_head frame_list;
1566 int remaining, err;
1567 u8 dst[ETH_ALEN];
1568 u8 src[ETH_ALEN];
1569 1575
1570 if (unlikely(!ieee80211_is_data(fc))) 1576 if (unlikely(!ieee80211_is_data(fc)))
1571 return RX_CONTINUE; 1577 return RX_CONTINUE;
@@ -1576,94 +1582,34 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1576 if (!(rx->flags & IEEE80211_RX_AMSDU)) 1582 if (!(rx->flags & IEEE80211_RX_AMSDU))
1577 return RX_CONTINUE; 1583 return RX_CONTINUE;
1578 1584
1579 err = __ieee80211_data_to_8023(rx); 1585 if (ieee80211_has_a4(hdr->frame_control) &&
1580 if (unlikely(err)) 1586 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1587 !rx->sdata->u.vlan.sta)
1581 return RX_DROP_UNUSABLE; 1588 return RX_DROP_UNUSABLE;
1582 1589
1583 skb->dev = dev; 1590 if (is_multicast_ether_addr(hdr->addr1) &&
1584 1591 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1585 dev->stats.rx_packets++; 1592 rx->sdata->u.vlan.sta) ||
1586 dev->stats.rx_bytes += skb->len; 1593 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1587 1594 rx->sdata->u.mgd.use_4addr)))
1588 /* skip the wrapping header */
1589 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
1590 if (!eth)
1591 return RX_DROP_UNUSABLE; 1595 return RX_DROP_UNUSABLE;
1592 1596
1593 while (skb != frame) { 1597 skb->dev = dev;
1594 u8 padding; 1598 __skb_queue_head_init(&frame_list);
1595 __be16 len = eth->h_proto;
1596 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
1597
1598 remaining = skb->len;
1599 memcpy(dst, eth->h_dest, ETH_ALEN);
1600 memcpy(src, eth->h_source, ETH_ALEN);
1601
1602 padding = ((4 - subframe_len) & 0x3);
1603 /* the last MSDU has no padding */
1604 if (subframe_len > remaining)
1605 return RX_DROP_UNUSABLE;
1606 1599
1607 skb_pull(skb, sizeof(struct ethhdr)); 1600 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1608 /* if last subframe reuse skb */ 1601 rx->sdata->vif.type,
1609 if (remaining <= subframe_len + padding) 1602 rx->local->hw.extra_tx_headroom);
1610 frame = skb;
1611 else {
1612 /*
1613 * Allocate and reserve two bytes more for payload
1614 * alignment since sizeof(struct ethhdr) is 14.
1615 */
1616 frame = dev_alloc_skb(
1617 ALIGN(local->hw.extra_tx_headroom, 4) +
1618 subframe_len + 2);
1619
1620 if (frame == NULL)
1621 return RX_DROP_UNUSABLE;
1622
1623 skb_reserve(frame,
1624 ALIGN(local->hw.extra_tx_headroom, 4) +
1625 sizeof(struct ethhdr) + 2);
1626 memcpy(skb_put(frame, ntohs(len)), skb->data,
1627 ntohs(len));
1628
1629 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
1630 padding);
1631 if (!eth) {
1632 dev_kfree_skb(frame);
1633 return RX_DROP_UNUSABLE;
1634 }
1635 }
1636 1603
1637 skb_reset_network_header(frame); 1604 while (!skb_queue_empty(&frame_list)) {
1638 frame->dev = dev; 1605 rx->skb = __skb_dequeue(&frame_list);
1639 frame->priority = skb->priority;
1640 rx->skb = frame;
1641
1642 payload = frame->data;
1643 ethertype = (payload[6] << 8) | payload[7];
1644
1645 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1646 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1647 compare_ether_addr(payload,
1648 bridge_tunnel_header) == 0)) {
1649 /* remove RFC1042 or Bridge-Tunnel
1650 * encapsulation and replace EtherType */
1651 skb_pull(frame, 6);
1652 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1653 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1654 } else {
1655 memcpy(skb_push(frame, sizeof(__be16)),
1656 &len, sizeof(__be16));
1657 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1658 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1659 }
1660 1606
1661 if (!ieee80211_frame_allowed(rx, fc)) { 1607 if (!ieee80211_frame_allowed(rx, fc)) {
1662 if (skb == frame) /* last frame */ 1608 dev_kfree_skb(rx->skb);
1663 return RX_DROP_UNUSABLE;
1664 dev_kfree_skb(frame);
1665 continue; 1609 continue;
1666 } 1610 }
1611 dev->stats.rx_packets++;
1612 dev->stats.rx_bytes += rx->skb->len;
1667 1613
1668 ieee80211_deliver_skb(rx); 1614 ieee80211_deliver_skb(rx);
1669 } 1615 }
@@ -1721,7 +1667,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1721 1667
1722 /* Frame has reached destination. Don't forward */ 1668 /* Frame has reached destination. Don't forward */
1723 if (!is_multicast_ether_addr(hdr->addr1) && 1669 if (!is_multicast_ether_addr(hdr->addr1) &&
1724 compare_ether_addr(sdata->dev->dev_addr, hdr->addr3) == 0) 1670 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
1725 return RX_CONTINUE; 1671 return RX_CONTINUE;
1726 1672
1727 mesh_hdr->ttl--; 1673 mesh_hdr->ttl--;
@@ -1738,15 +1684,17 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1738 1684
1739 if (!fwd_skb && net_ratelimit()) 1685 if (!fwd_skb && net_ratelimit())
1740 printk(KERN_DEBUG "%s: failed to clone mesh frame\n", 1686 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1741 sdata->dev->name); 1687 sdata->name);
1742 1688
1743 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 1689 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1744 memcpy(fwd_hdr->addr2, sdata->dev->dev_addr, ETH_ALEN); 1690 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
1745 info = IEEE80211_SKB_CB(fwd_skb); 1691 info = IEEE80211_SKB_CB(fwd_skb);
1746 memset(info, 0, sizeof(*info)); 1692 memset(info, 0, sizeof(*info));
1747 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1693 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1748 info->control.vif = &rx->sdata->vif; 1694 info->control.vif = &rx->sdata->vif;
1749 ieee80211_select_queue(local, fwd_skb); 1695 skb_set_queue_mapping(skb,
1696 ieee80211_select_queue(rx->sdata, fwd_skb));
1697 ieee80211_set_qos_hdr(local, skb);
1750 if (is_multicast_ether_addr(fwd_hdr->addr1)) 1698 if (is_multicast_ether_addr(fwd_hdr->addr1))
1751 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, 1699 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
1752 fwded_mcast); 1700 fwded_mcast);
@@ -1786,6 +1734,7 @@ static ieee80211_rx_result debug_noinline
1786ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 1734ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1787{ 1735{
1788 struct ieee80211_sub_if_data *sdata = rx->sdata; 1736 struct ieee80211_sub_if_data *sdata = rx->sdata;
1737 struct ieee80211_local *local = rx->local;
1789 struct net_device *dev = sdata->dev; 1738 struct net_device *dev = sdata->dev;
1790 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1739 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1791 __le16 fc = hdr->frame_control; 1740 __le16 fc = hdr->frame_control;
@@ -1817,6 +1766,13 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1817 dev->stats.rx_packets++; 1766 dev->stats.rx_packets++;
1818 dev->stats.rx_bytes += rx->skb->len; 1767 dev->stats.rx_bytes += rx->skb->len;
1819 1768
1769 if (ieee80211_is_data(hdr->frame_control) &&
1770 !is_multicast_ether_addr(hdr->addr1) &&
1771 local->hw.conf.dynamic_ps_timeout > 0 && local->ps_sdata) {
1772 mod_timer(&local->dynamic_ps_timer, jiffies +
1773 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
1774 }
1775
1820 ieee80211_deliver_skb(rx); 1776 ieee80211_deliver_skb(rx);
1821 1777
1822 return RX_QUEUED; 1778 return RX_QUEUED;
@@ -1870,7 +1826,7 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1870 struct sk_buff *skb; 1826 struct sk_buff *skb;
1871 struct ieee80211_mgmt *resp; 1827 struct ieee80211_mgmt *resp;
1872 1828
1873 if (compare_ether_addr(mgmt->da, sdata->dev->dev_addr) != 0) { 1829 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
1874 /* Not to own unicast address */ 1830 /* Not to own unicast address */
1875 return; 1831 return;
1876 } 1832 }
@@ -1894,7 +1850,7 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1894 resp = (struct ieee80211_mgmt *) skb_put(skb, 24); 1850 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
1895 memset(resp, 0, 24); 1851 memset(resp, 0, 24);
1896 memcpy(resp->da, mgmt->sa, ETH_ALEN); 1852 memcpy(resp->da, mgmt->sa, ETH_ALEN);
1897 memcpy(resp->sa, sdata->dev->dev_addr, ETH_ALEN); 1853 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
1898 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 1854 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
1899 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 1855 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1900 IEEE80211_STYPE_ACTION); 1856 IEEE80211_STYPE_ACTION);
@@ -1914,23 +1870,25 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1914 struct ieee80211_local *local = rx->local; 1870 struct ieee80211_local *local = rx->local;
1915 struct ieee80211_sub_if_data *sdata = rx->sdata; 1871 struct ieee80211_sub_if_data *sdata = rx->sdata;
1916 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 1872 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1873 struct sk_buff *nskb;
1874 struct ieee80211_rx_status *status;
1917 int len = rx->skb->len; 1875 int len = rx->skb->len;
1918 1876
1919 if (!ieee80211_is_action(mgmt->frame_control)) 1877 if (!ieee80211_is_action(mgmt->frame_control))
1920 return RX_CONTINUE; 1878 return RX_CONTINUE;
1921 1879
1922 if (!rx->sta) 1880 /* drop too small frames */
1923 return RX_DROP_MONITOR; 1881 if (len < IEEE80211_MIN_ACTION_SIZE)
1882 return RX_DROP_UNUSABLE;
1924 1883
1925 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 1884 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
1926 return RX_DROP_MONITOR; 1885 return RX_DROP_UNUSABLE;
1927 1886
1928 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control)) 1887 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1929 return RX_DROP_MONITOR; 1888 return RX_DROP_UNUSABLE;
1930 1889
1931 /* all categories we currently handle have action_code */ 1890 if (ieee80211_drop_unencrypted_mgmt(rx))
1932 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 1891 return RX_DROP_UNUSABLE;
1933 return RX_DROP_MONITOR;
1934 1892
1935 switch (mgmt->u.action.category) { 1893 switch (mgmt->u.action.category) {
1936 case WLAN_CATEGORY_BACK: 1894 case WLAN_CATEGORY_BACK:
@@ -1943,7 +1901,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1943 if (sdata->vif.type != NL80211_IFTYPE_STATION && 1901 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1944 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 1902 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1945 sdata->vif.type != NL80211_IFTYPE_AP) 1903 sdata->vif.type != NL80211_IFTYPE_AP)
1946 return RX_DROP_MONITOR; 1904 break;
1905
1906 /* verify action_code is present */
1907 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1908 break;
1947 1909
1948 switch (mgmt->u.action.u.addba_req.action_code) { 1910 switch (mgmt->u.action.u.addba_req.action_code) {
1949 case WLAN_ACTION_ADDBA_REQ: 1911 case WLAN_ACTION_ADDBA_REQ:
@@ -1951,45 +1913,49 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1951 sizeof(mgmt->u.action.u.addba_req))) 1913 sizeof(mgmt->u.action.u.addba_req)))
1952 return RX_DROP_MONITOR; 1914 return RX_DROP_MONITOR;
1953 ieee80211_process_addba_request(local, rx->sta, mgmt, len); 1915 ieee80211_process_addba_request(local, rx->sta, mgmt, len);
1954 break; 1916 goto handled;
1955 case WLAN_ACTION_ADDBA_RESP: 1917 case WLAN_ACTION_ADDBA_RESP:
1956 if (len < (IEEE80211_MIN_ACTION_SIZE + 1918 if (len < (IEEE80211_MIN_ACTION_SIZE +
1957 sizeof(mgmt->u.action.u.addba_resp))) 1919 sizeof(mgmt->u.action.u.addba_resp)))
1958 return RX_DROP_MONITOR; 1920 break;
1959 ieee80211_process_addba_resp(local, rx->sta, mgmt, len); 1921 ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
1960 break; 1922 goto handled;
1961 case WLAN_ACTION_DELBA: 1923 case WLAN_ACTION_DELBA:
1962 if (len < (IEEE80211_MIN_ACTION_SIZE + 1924 if (len < (IEEE80211_MIN_ACTION_SIZE +
1963 sizeof(mgmt->u.action.u.delba))) 1925 sizeof(mgmt->u.action.u.delba)))
1964 return RX_DROP_MONITOR; 1926 break;
1965 ieee80211_process_delba(sdata, rx->sta, mgmt, len); 1927 ieee80211_process_delba(sdata, rx->sta, mgmt, len);
1966 break; 1928 goto handled;
1967 } 1929 }
1968 break; 1930 break;
1969 case WLAN_CATEGORY_SPECTRUM_MGMT: 1931 case WLAN_CATEGORY_SPECTRUM_MGMT:
1970 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) 1932 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
1971 return RX_DROP_MONITOR; 1933 break;
1972 1934
1973 if (sdata->vif.type != NL80211_IFTYPE_STATION) 1935 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1974 return RX_DROP_MONITOR; 1936 break;
1937
1938 /* verify action_code is present */
1939 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1940 break;
1975 1941
1976 switch (mgmt->u.action.u.measurement.action_code) { 1942 switch (mgmt->u.action.u.measurement.action_code) {
1977 case WLAN_ACTION_SPCT_MSR_REQ: 1943 case WLAN_ACTION_SPCT_MSR_REQ:
1978 if (len < (IEEE80211_MIN_ACTION_SIZE + 1944 if (len < (IEEE80211_MIN_ACTION_SIZE +
1979 sizeof(mgmt->u.action.u.measurement))) 1945 sizeof(mgmt->u.action.u.measurement)))
1980 return RX_DROP_MONITOR; 1946 break;
1981 ieee80211_process_measurement_req(sdata, mgmt, len); 1947 ieee80211_process_measurement_req(sdata, mgmt, len);
1982 break; 1948 goto handled;
1983 case WLAN_ACTION_SPCT_CHL_SWITCH: 1949 case WLAN_ACTION_SPCT_CHL_SWITCH:
1984 if (len < (IEEE80211_MIN_ACTION_SIZE + 1950 if (len < (IEEE80211_MIN_ACTION_SIZE +
1985 sizeof(mgmt->u.action.u.chan_switch))) 1951 sizeof(mgmt->u.action.u.chan_switch)))
1986 return RX_DROP_MONITOR; 1952 break;
1987 1953
1988 if (sdata->vif.type != NL80211_IFTYPE_STATION) 1954 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1989 return RX_DROP_MONITOR; 1955 break;
1990 1956
1991 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) 1957 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
1992 return RX_DROP_MONITOR; 1958 break;
1993 1959
1994 return ieee80211_sta_rx_mgmt(sdata, rx->skb); 1960 return ieee80211_sta_rx_mgmt(sdata, rx->skb);
1995 } 1961 }
@@ -1997,26 +1963,64 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1997 case WLAN_CATEGORY_SA_QUERY: 1963 case WLAN_CATEGORY_SA_QUERY:
1998 if (len < (IEEE80211_MIN_ACTION_SIZE + 1964 if (len < (IEEE80211_MIN_ACTION_SIZE +
1999 sizeof(mgmt->u.action.u.sa_query))) 1965 sizeof(mgmt->u.action.u.sa_query)))
2000 return RX_DROP_MONITOR; 1966 break;
1967
2001 switch (mgmt->u.action.u.sa_query.action) { 1968 switch (mgmt->u.action.u.sa_query.action) {
2002 case WLAN_ACTION_SA_QUERY_REQUEST: 1969 case WLAN_ACTION_SA_QUERY_REQUEST:
2003 if (sdata->vif.type != NL80211_IFTYPE_STATION) 1970 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2004 return RX_DROP_MONITOR; 1971 break;
2005 ieee80211_process_sa_query_req(sdata, mgmt, len); 1972 ieee80211_process_sa_query_req(sdata, mgmt, len);
2006 break; 1973 goto handled;
2007 case WLAN_ACTION_SA_QUERY_RESPONSE:
2008 /*
2009 * SA Query response is currently only used in AP mode
2010 * and it is processed in user space.
2011 */
2012 return RX_CONTINUE;
2013 } 1974 }
2014 break; 1975 break;
2015 default:
2016 return RX_CONTINUE;
2017 } 1976 }
2018 1977
2019 rx->sta->rx_packets++; 1978 /*
1979 * For AP mode, hostapd is responsible for handling any action
1980 * frames that we didn't handle, including returning unknown
1981 * ones. For all other modes we will return them to the sender,
1982 * setting the 0x80 bit in the action category, as required by
1983 * 802.11-2007 7.3.1.11.
1984 */
1985 if (sdata->vif.type == NL80211_IFTYPE_AP ||
1986 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1987 return RX_DROP_MONITOR;
1988
1989 /*
1990 * Getting here means the kernel doesn't know how to handle
1991 * it, but maybe userspace does ... include returned frames
1992 * so userspace can register for those to know whether ones
1993 * it transmitted were processed or returned.
1994 */
1995 status = IEEE80211_SKB_RXCB(rx->skb);
1996
1997 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1998 cfg80211_rx_action(rx->sdata->dev, status->freq,
1999 rx->skb->data, rx->skb->len,
2000 GFP_ATOMIC))
2001 goto handled;
2002
2003 /* do not return rejected action frames */
2004 if (mgmt->u.action.category & 0x80)
2005 return RX_DROP_UNUSABLE;
2006
2007 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
2008 GFP_ATOMIC);
2009 if (nskb) {
2010 struct ieee80211_mgmt *mgmt = (void *)nskb->data;
2011
2012 mgmt->u.action.category |= 0x80;
2013 memcpy(mgmt->da, mgmt->sa, ETH_ALEN);
2014 memcpy(mgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
2015
2016 memset(nskb->cb, 0, sizeof(nskb->cb));
2017
2018 ieee80211_tx_skb(rx->sdata, nskb);
2019 }
2020
2021 handled:
2022 if (rx->sta)
2023 rx->sta->rx_packets++;
2020 dev_kfree_skb(rx->skb); 2024 dev_kfree_skb(rx->skb);
2021 return RX_QUEUED; 2025 return RX_QUEUED;
2022} 2026}
@@ -2025,13 +2029,17 @@ static ieee80211_rx_result debug_noinline
2025ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 2029ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2026{ 2030{
2027 struct ieee80211_sub_if_data *sdata = rx->sdata; 2031 struct ieee80211_sub_if_data *sdata = rx->sdata;
2028 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2032 ieee80211_rx_result rxs;
2029 2033
2030 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 2034 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
2031 return RX_DROP_MONITOR; 2035 return RX_DROP_MONITOR;
2032 2036
2033 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control)) 2037 if (ieee80211_drop_unencrypted_mgmt(rx))
2034 return RX_DROP_MONITOR; 2038 return RX_DROP_UNUSABLE;
2039
2040 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
2041 if (rxs != RX_CONTINUE)
2042 return rxs;
2035 2043
2036 if (ieee80211_vif_is_mesh(&sdata->vif)) 2044 if (ieee80211_vif_is_mesh(&sdata->vif))
2037 return ieee80211_mesh_rx_mgmt(sdata, rx->skb); 2045 return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
@@ -2137,7 +2145,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2137 skb->protocol = htons(ETH_P_802_2); 2145 skb->protocol = htons(ETH_P_802_2);
2138 2146
2139 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2147 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2140 if (!netif_running(sdata->dev)) 2148 if (!ieee80211_sdata_running(sdata))
2141 continue; 2149 continue;
2142 2150
2143 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 2151 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
@@ -2274,7 +2282,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2274 if (!bssid && !sdata->u.mgd.use_4addr) 2282 if (!bssid && !sdata->u.mgd.use_4addr)
2275 return 0; 2283 return 0;
2276 if (!multicast && 2284 if (!multicast &&
2277 compare_ether_addr(sdata->dev->dev_addr, hdr->addr1) != 0) { 2285 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
2278 if (!(sdata->dev->flags & IFF_PROMISC)) 2286 if (!(sdata->dev->flags & IFF_PROMISC))
2279 return 0; 2287 return 0;
2280 rx->flags &= ~IEEE80211_RX_RA_MATCH; 2288 rx->flags &= ~IEEE80211_RX_RA_MATCH;
@@ -2291,7 +2299,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2291 return 0; 2299 return 0;
2292 rx->flags &= ~IEEE80211_RX_RA_MATCH; 2300 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2293 } else if (!multicast && 2301 } else if (!multicast &&
2294 compare_ether_addr(sdata->dev->dev_addr, 2302 compare_ether_addr(sdata->vif.addr,
2295 hdr->addr1) != 0) { 2303 hdr->addr1) != 0) {
2296 if (!(sdata->dev->flags & IFF_PROMISC)) 2304 if (!(sdata->dev->flags & IFF_PROMISC))
2297 return 0; 2305 return 0;
@@ -2302,13 +2310,13 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2302 rate_idx = 0; /* TODO: HT rates */ 2310 rate_idx = 0; /* TODO: HT rates */
2303 else 2311 else
2304 rate_idx = status->rate_idx; 2312 rate_idx = status->rate_idx;
2305 rx->sta = ieee80211_ibss_add_sta(sdata, bssid, hdr->addr2, 2313 rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
2306 BIT(rate_idx)); 2314 hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
2307 } 2315 }
2308 break; 2316 break;
2309 case NL80211_IFTYPE_MESH_POINT: 2317 case NL80211_IFTYPE_MESH_POINT:
2310 if (!multicast && 2318 if (!multicast &&
2311 compare_ether_addr(sdata->dev->dev_addr, 2319 compare_ether_addr(sdata->vif.addr,
2312 hdr->addr1) != 0) { 2320 hdr->addr1) != 0) {
2313 if (!(sdata->dev->flags & IFF_PROMISC)) 2321 if (!(sdata->dev->flags & IFF_PROMISC))
2314 return 0; 2322 return 0;
@@ -2319,11 +2327,11 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2319 case NL80211_IFTYPE_AP_VLAN: 2327 case NL80211_IFTYPE_AP_VLAN:
2320 case NL80211_IFTYPE_AP: 2328 case NL80211_IFTYPE_AP:
2321 if (!bssid) { 2329 if (!bssid) {
2322 if (compare_ether_addr(sdata->dev->dev_addr, 2330 if (compare_ether_addr(sdata->vif.addr,
2323 hdr->addr1)) 2331 hdr->addr1))
2324 return 0; 2332 return 0;
2325 } else if (!ieee80211_bssid_match(bssid, 2333 } else if (!ieee80211_bssid_match(bssid,
2326 sdata->dev->dev_addr)) { 2334 sdata->vif.addr)) {
2327 if (!(rx->flags & IEEE80211_RX_IN_SCAN)) 2335 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2328 return 0; 2336 return 0;
2329 rx->flags &= ~IEEE80211_RX_RA_MATCH; 2337 rx->flags &= ~IEEE80211_RX_RA_MATCH;
@@ -2362,6 +2370,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2362 int prepares; 2370 int prepares;
2363 struct ieee80211_sub_if_data *prev = NULL; 2371 struct ieee80211_sub_if_data *prev = NULL;
2364 struct sk_buff *skb_new; 2372 struct sk_buff *skb_new;
2373 struct sta_info *sta, *tmp;
2374 bool found_sta = false;
2365 2375
2366 hdr = (struct ieee80211_hdr *)skb->data; 2376 hdr = (struct ieee80211_hdr *)skb->data;
2367 memset(&rx, 0, sizeof(rx)); 2377 memset(&rx, 0, sizeof(rx));
@@ -2378,68 +2388,87 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2378 ieee80211_parse_qos(&rx); 2388 ieee80211_parse_qos(&rx);
2379 ieee80211_verify_alignment(&rx); 2389 ieee80211_verify_alignment(&rx);
2380 2390
2381 rx.sta = sta_info_get(local, hdr->addr2); 2391 if (ieee80211_is_data(hdr->frame_control)) {
2382 if (rx.sta) 2392 for_each_sta_info(local, hdr->addr2, sta, tmp) {
2383 rx.sdata = rx.sta->sdata; 2393 rx.sta = sta;
2384 2394 found_sta = true;
2385 if (rx.sdata && ieee80211_is_data(hdr->frame_control)) { 2395 rx.sdata = sta->sdata;
2386 rx.flags |= IEEE80211_RX_RA_MATCH; 2396
2387 prepares = prepare_for_handlers(rx.sdata, &rx, hdr); 2397 rx.flags |= IEEE80211_RX_RA_MATCH;
2388 if (prepares) { 2398 prepares = prepare_for_handlers(rx.sdata, &rx, hdr);
2389 if (status->flag & RX_FLAG_MMIC_ERROR) { 2399 if (prepares) {
2390 if (rx.flags & IEEE80211_RX_RA_MATCH) 2400 if (status->flag & RX_FLAG_MMIC_ERROR) {
2391 ieee80211_rx_michael_mic_report(hdr, &rx); 2401 if (rx.flags & IEEE80211_RX_RA_MATCH)
2392 } else 2402 ieee80211_rx_michael_mic_report(hdr, &rx);
2393 prev = rx.sdata; 2403 } else
2404 prev = rx.sdata;
2405 }
2394 } 2406 }
2395 } else list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2407 }
2396 if (!netif_running(sdata->dev)) 2408 if (!found_sta) {
2397 continue; 2409 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2410 if (!ieee80211_sdata_running(sdata))
2411 continue;
2398 2412
2399 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 2413 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2400 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 2414 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2401 continue; 2415 continue;
2402 2416
2403 rx.flags |= IEEE80211_RX_RA_MATCH; 2417 /*
2404 prepares = prepare_for_handlers(sdata, &rx, hdr); 2418 * frame is destined for this interface, but if it's
2419 * not also for the previous one we handle that after
2420 * the loop to avoid copying the SKB once too much
2421 */
2405 2422
2406 if (!prepares) 2423 if (!prev) {
2407 continue; 2424 prev = sdata;
2425 continue;
2426 }
2408 2427
2409 if (status->flag & RX_FLAG_MMIC_ERROR) { 2428 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2410 rx.sdata = sdata;
2411 if (rx.flags & IEEE80211_RX_RA_MATCH)
2412 ieee80211_rx_michael_mic_report(hdr, &rx);
2413 continue;
2414 }
2415 2429
2416 /* 2430 rx.flags |= IEEE80211_RX_RA_MATCH;
2417 * frame is destined for this interface, but if it's not 2431 prepares = prepare_for_handlers(prev, &rx, hdr);
2418 * also for the previous one we handle that after the 2432
2419 * loop to avoid copying the SKB once too much 2433 if (!prepares)
2420 */ 2434 goto next;
2421 2435
2422 if (!prev) { 2436 if (status->flag & RX_FLAG_MMIC_ERROR) {
2437 rx.sdata = prev;
2438 if (rx.flags & IEEE80211_RX_RA_MATCH)
2439 ieee80211_rx_michael_mic_report(hdr,
2440 &rx);
2441 goto next;
2442 }
2443
2444 /*
2445 * frame was destined for the previous interface
2446 * so invoke RX handlers for it
2447 */
2448
2449 skb_new = skb_copy(skb, GFP_ATOMIC);
2450 if (!skb_new) {
2451 if (net_ratelimit())
2452 printk(KERN_DEBUG "%s: failed to copy "
2453 "multicast frame for %s\n",
2454 wiphy_name(local->hw.wiphy),
2455 prev->name);
2456 goto next;
2457 }
2458 ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate);
2459next:
2423 prev = sdata; 2460 prev = sdata;
2424 continue;
2425 } 2461 }
2426 2462
2427 /* 2463 if (prev) {
2428 * frame was destined for the previous interface 2464 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2429 * so invoke RX handlers for it
2430 */
2431 2465
2432 skb_new = skb_copy(skb, GFP_ATOMIC); 2466 rx.flags |= IEEE80211_RX_RA_MATCH;
2433 if (!skb_new) { 2467 prepares = prepare_for_handlers(prev, &rx, hdr);
2434 if (net_ratelimit()) 2468
2435 printk(KERN_DEBUG "%s: failed to copy " 2469 if (!prepares)
2436 "multicast frame for %s\n", 2470 prev = NULL;
2437 wiphy_name(local->hw.wiphy),
2438 prev->dev->name);
2439 continue;
2440 } 2471 }
2441 ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate);
2442 prev = sdata;
2443 } 2472 }
2444 if (prev) 2473 if (prev)
2445 ieee80211_invoke_rx_handlers(prev, &rx, skb, rate); 2474 ieee80211_invoke_rx_handlers(prev, &rx, skb, rate);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index f1a4c7160300..b822dce97867 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -12,7 +12,6 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14 14
15#include <linux/wireless.h>
16#include <linux/if_arp.h> 15#include <linux/if_arp.h>
17#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
18#include <net/mac80211.h> 17#include <net/mac80211.h>
@@ -29,16 +28,19 @@ struct ieee80211_bss *
29ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, 28ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
30 u8 *ssid, u8 ssid_len) 29 u8 *ssid, u8 ssid_len)
31{ 30{
32 return (void *)cfg80211_get_bss(local->hw.wiphy, 31 struct cfg80211_bss *cbss;
33 ieee80211_get_channel(local->hw.wiphy, 32
34 freq), 33 cbss = cfg80211_get_bss(local->hw.wiphy,
35 bssid, ssid, ssid_len, 34 ieee80211_get_channel(local->hw.wiphy, freq),
36 0, 0); 35 bssid, ssid, ssid_len, 0, 0);
36 if (!cbss)
37 return NULL;
38 return (void *)cbss->priv;
37} 39}
38 40
39static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss) 41static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
40{ 42{
41 struct ieee80211_bss *bss = (void *)cbss; 43 struct ieee80211_bss *bss = (void *)cbss->priv;
42 44
43 kfree(bss_mesh_id(bss)); 45 kfree(bss_mesh_id(bss));
44 kfree(bss_mesh_cfg(bss)); 46 kfree(bss_mesh_cfg(bss));
@@ -47,7 +49,26 @@ static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
47void ieee80211_rx_bss_put(struct ieee80211_local *local, 49void ieee80211_rx_bss_put(struct ieee80211_local *local,
48 struct ieee80211_bss *bss) 50 struct ieee80211_bss *bss)
49{ 51{
50 cfg80211_put_bss((struct cfg80211_bss *)bss); 52 if (!bss)
53 return;
54 cfg80211_put_bss(container_of((void *)bss, struct cfg80211_bss, priv));
55}
56
57static bool is_uapsd_supported(struct ieee802_11_elems *elems)
58{
59 u8 qos_info;
60
61 if (elems->wmm_info && elems->wmm_info_len == 7
62 && elems->wmm_info[5] == 1)
63 qos_info = elems->wmm_info[6];
64 else if (elems->wmm_param && elems->wmm_param_len == 24
65 && elems->wmm_param[5] == 1)
66 qos_info = elems->wmm_param[6];
67 else
68 /* no valid wmm information or parameter element found */
69 return false;
70
71 return qos_info & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD;
51} 72}
52 73
53struct ieee80211_bss * 74struct ieee80211_bss *
@@ -59,6 +80,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
59 struct ieee80211_channel *channel, 80 struct ieee80211_channel *channel,
60 bool beacon) 81 bool beacon)
61{ 82{
83 struct cfg80211_bss *cbss;
62 struct ieee80211_bss *bss; 84 struct ieee80211_bss *bss;
63 int clen; 85 int clen;
64 s32 signal = 0; 86 s32 signal = 0;
@@ -68,13 +90,14 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
68 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) 90 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
69 signal = (rx_status->signal * 100) / local->hw.max_signal; 91 signal = (rx_status->signal * 100) / local->hw.max_signal;
70 92
71 bss = (void *)cfg80211_inform_bss_frame(local->hw.wiphy, channel, 93 cbss = cfg80211_inform_bss_frame(local->hw.wiphy, channel,
72 mgmt, len, signal, GFP_ATOMIC); 94 mgmt, len, signal, GFP_ATOMIC);
73 95
74 if (!bss) 96 if (!cbss)
75 return NULL; 97 return NULL;
76 98
77 bss->cbss.free_priv = ieee80211_rx_bss_free; 99 cbss->free_priv = ieee80211_rx_bss_free;
100 bss = (void *)cbss->priv;
78 101
79 /* save the ERP value so that it is available at association time */ 102 /* save the ERP value so that it is available at association time */
80 if (elems->erp_info && elems->erp_info_len >= 1) { 103 if (elems->erp_info && elems->erp_info_len >= 1) {
@@ -88,10 +111,6 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
88 bss->dtim_period = tim_ie->dtim_period; 111 bss->dtim_period = tim_ie->dtim_period;
89 } 112 }
90 113
91 /* set default value for buggy AP/no TIM element */
92 if (bss->dtim_period == 0)
93 bss->dtim_period = 1;
94
95 bss->supp_rates_len = 0; 114 bss->supp_rates_len = 0;
96 if (elems->supp_rates) { 115 if (elems->supp_rates) {
97 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; 116 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
@@ -111,6 +130,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
111 } 130 }
112 131
113 bss->wmm_used = elems->wmm_param || elems->wmm_info; 132 bss->wmm_used = elems->wmm_param || elems->wmm_info;
133 bss->uapsd_supported = is_uapsd_supported(elems);
114 134
115 if (!beacon) 135 if (!beacon)
116 bss->last_probe_resp = jiffies; 136 bss->last_probe_resp = jiffies;
@@ -147,7 +167,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
147 presp = ieee80211_is_probe_resp(fc); 167 presp = ieee80211_is_probe_resp(fc);
148 if (presp) { 168 if (presp) {
149 /* ignore ProbeResp to foreign address */ 169 /* ignore ProbeResp to foreign address */
150 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) 170 if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
151 return RX_DROP_MONITOR; 171 return RX_DROP_MONITOR;
152 172
153 presp = true; 173 presp = true;
@@ -220,82 +240,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
220 return true; 240 return true;
221} 241}
222 242
223/*
224 * inform AP that we will go to sleep so that it will buffer the frames
225 * while we scan
226 */
227static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata)
228{
229 struct ieee80211_local *local = sdata->local;
230
231 local->scan_ps_enabled = false;
232
233 /* FIXME: what to do when local->pspolling is true? */
234
235 del_timer_sync(&local->dynamic_ps_timer);
236 cancel_work_sync(&local->dynamic_ps_enable_work);
237
238 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
239 local->scan_ps_enabled = true;
240 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
241 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
242 }
243
244 if (!(local->scan_ps_enabled) ||
245 !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
246 /*
247 * If power save was enabled, no need to send a nullfunc
248 * frame because AP knows that we are sleeping. But if the
249 * hardware is creating the nullfunc frame for power save
250 * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
251 * enabled) and power save was enabled, the firmware just
252 * sent a null frame with power save disabled. So we need
253 * to send a new nullfunc frame to inform the AP that we
254 * are again sleeping.
255 */
256 ieee80211_send_nullfunc(local, sdata, 1);
257}
258
259/* inform AP that we are awake again, unless power save is enabled */
260static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
261{
262 struct ieee80211_local *local = sdata->local;
263
264 if (!local->ps_sdata)
265 ieee80211_send_nullfunc(local, sdata, 0);
266 else if (local->scan_ps_enabled) {
267 /*
268 * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
269 * will send a nullfunc frame with the powersave bit set
270 * even though the AP already knows that we are sleeping.
271 * This could be avoided by sending a null frame with power
272 * save bit disabled before enabling the power save, but
273 * this doesn't gain anything.
274 *
275 * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
276 * to send a nullfunc frame because AP already knows that
277 * we are sleeping, let's just enable power save mode in
278 * hardware.
279 */
280 local->hw.conf.flags |= IEEE80211_CONF_PS;
281 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
282 } else if (local->hw.conf.dynamic_ps_timeout > 0) {
283 /*
284 * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
285 * had been running before leaving the operating channel,
286 * restart the timer now and send a nullfunc frame to inform
287 * the AP that we are awake.
288 */
289 ieee80211_send_nullfunc(local, sdata, 0);
290 mod_timer(&local->dynamic_ps_timer, jiffies +
291 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
292 }
293}
294
295void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) 243void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
296{ 244{
297 struct ieee80211_local *local = hw_to_local(hw); 245 struct ieee80211_local *local = hw_to_local(hw);
298 struct ieee80211_sub_if_data *sdata;
299 bool was_hw_scan; 246 bool was_hw_scan;
300 247
301 mutex_lock(&local->scan_mtx); 248 mutex_lock(&local->scan_mtx);
@@ -344,41 +291,19 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
344 291
345 drv_sw_scan_complete(local); 292 drv_sw_scan_complete(local);
346 293
347 mutex_lock(&local->iflist_mtx); 294 ieee80211_offchannel_return(local, true);
348 list_for_each_entry(sdata, &local->interfaces, list) {
349 if (!netif_running(sdata->dev))
350 continue;
351
352 /* Tell AP we're back */
353 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
354 if (sdata->u.mgd.associated) {
355 ieee80211_scan_ps_disable(sdata);
356 netif_wake_queue(sdata->dev);
357 }
358 } else
359 netif_wake_queue(sdata->dev);
360
361 /* re-enable beaconing */
362 if (sdata->vif.type == NL80211_IFTYPE_AP ||
363 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
364 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
365 ieee80211_bss_info_change_notify(
366 sdata, BSS_CHANGED_BEACON_ENABLED);
367 }
368 mutex_unlock(&local->iflist_mtx);
369 295
370 done: 296 done:
371 ieee80211_recalc_idle(local); 297 ieee80211_recalc_idle(local);
372 ieee80211_mlme_notify_scan_completed(local); 298 ieee80211_mlme_notify_scan_completed(local);
373 ieee80211_ibss_notify_scan_completed(local); 299 ieee80211_ibss_notify_scan_completed(local);
374 ieee80211_mesh_notify_scan_completed(local); 300 ieee80211_mesh_notify_scan_completed(local);
301 ieee80211_queue_work(&local->hw, &local->work_work);
375} 302}
376EXPORT_SYMBOL(ieee80211_scan_completed); 303EXPORT_SYMBOL(ieee80211_scan_completed);
377 304
378static int ieee80211_start_sw_scan(struct ieee80211_local *local) 305static int ieee80211_start_sw_scan(struct ieee80211_local *local)
379{ 306{
380 struct ieee80211_sub_if_data *sdata;
381
382 /* 307 /*
383 * Hardware/driver doesn't support hw_scan, so use software 308 * Hardware/driver doesn't support hw_scan, so use software
384 * scanning instead. First send a nullfunc frame with power save 309 * scanning instead. First send a nullfunc frame with power save
@@ -394,33 +319,15 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
394 */ 319 */
395 drv_sw_scan_start(local); 320 drv_sw_scan_start(local);
396 321
397 mutex_lock(&local->iflist_mtx); 322 ieee80211_offchannel_stop_beaconing(local);
398 list_for_each_entry(sdata, &local->interfaces, list) {
399 if (!netif_running(sdata->dev))
400 continue;
401
402 /* disable beaconing */
403 if (sdata->vif.type == NL80211_IFTYPE_AP ||
404 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
405 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
406 ieee80211_bss_info_change_notify(
407 sdata, BSS_CHANGED_BEACON_ENABLED);
408
409 /*
410 * only handle non-STA interfaces here, STA interfaces
411 * are handled in the scan state machine
412 */
413 if (sdata->vif.type != NL80211_IFTYPE_STATION)
414 netif_stop_queue(sdata->dev);
415 }
416 mutex_unlock(&local->iflist_mtx);
417 323
418 local->next_scan_state = SCAN_DECISION; 324 local->next_scan_state = SCAN_DECISION;
419 local->scan_channel_idx = 0; 325 local->scan_channel_idx = 0;
420 326
327 drv_flush(local, false);
328
421 ieee80211_configure_filter(local); 329 ieee80211_configure_filter(local);
422 330
423 /* TODO: start scan as soon as all nullfunc frames are ACKed */
424 ieee80211_queue_delayed_work(&local->hw, 331 ieee80211_queue_delayed_work(&local->hw,
425 &local->scan_work, 332 &local->scan_work,
426 IEEE80211_CHANNEL_TIME); 333 IEEE80211_CHANNEL_TIME);
@@ -433,12 +340,18 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
433 struct cfg80211_scan_request *req) 340 struct cfg80211_scan_request *req)
434{ 341{
435 struct ieee80211_local *local = sdata->local; 342 struct ieee80211_local *local = sdata->local;
436 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
437 int rc; 343 int rc;
438 344
439 if (local->scan_req) 345 if (local->scan_req)
440 return -EBUSY; 346 return -EBUSY;
441 347
348 if (!list_empty(&local->work_list)) {
349 /* wait for the work to finish/time out */
350 local->scan_req = req;
351 local->scan_sdata = sdata;
352 return 0;
353 }
354
442 if (local->ops->hw_scan) { 355 if (local->ops->hw_scan) {
443 u8 *ies; 356 u8 *ies;
444 357
@@ -458,32 +371,33 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
458 local->hw_scan_req->ie = ies; 371 local->hw_scan_req->ie = ies;
459 372
460 local->hw_scan_band = 0; 373 local->hw_scan_band = 0;
374
375 /*
376 * After allocating local->hw_scan_req, we must
377 * go through until ieee80211_prep_hw_scan(), so
378 * anything that might be changed here and leave
379 * this function early must not go after this
380 * allocation.
381 */
461 } 382 }
462 383
463 local->scan_req = req; 384 local->scan_req = req;
464 local->scan_sdata = sdata; 385 local->scan_sdata = sdata;
465 386
466 if (req != local->int_scan_req &&
467 sdata->vif.type == NL80211_IFTYPE_STATION &&
468 !list_empty(&ifmgd->work_list)) {
469 /* actually wait for the work it's doing to finish/time out */
470 set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
471 return 0;
472 }
473
474 if (local->ops->hw_scan) 387 if (local->ops->hw_scan)
475 __set_bit(SCAN_HW_SCANNING, &local->scanning); 388 __set_bit(SCAN_HW_SCANNING, &local->scanning);
476 else 389 else
477 __set_bit(SCAN_SW_SCANNING, &local->scanning); 390 __set_bit(SCAN_SW_SCANNING, &local->scanning);
391
478 /* 392 /*
479 * Kicking off the scan need not be protected, 393 * Kicking off the scan need not be protected,
480 * only the scan variable stuff, since now 394 * only the scan variable stuff, since now
481 * local->scan_req is assigned and other callers 395 * local->scan_req is assigned and other callers
482 * will abort their scan attempts. 396 * will abort their scan attempts.
483 * 397 *
484 * This avoids getting a scan_mtx -> iflist_mtx 398 * This avoids too many locking dependencies
485 * dependency, so that the scan completed calls 399 * so that the scan completed calls have more
486 * have more locking freedom. 400 * locking freedom.
487 */ 401 */
488 402
489 ieee80211_recalc_idle(local); 403 ieee80211_recalc_idle(local);
@@ -526,7 +440,7 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
526 /* check if at least one STA interface is associated */ 440 /* check if at least one STA interface is associated */
527 mutex_lock(&local->iflist_mtx); 441 mutex_lock(&local->iflist_mtx);
528 list_for_each_entry(sdata, &local->interfaces, list) { 442 list_for_each_entry(sdata, &local->interfaces, list) {
529 if (!netif_running(sdata->dev)) 443 if (!ieee80211_sdata_running(sdata))
530 continue; 444 continue;
531 445
532 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 446 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
@@ -564,56 +478,35 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
564static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local, 478static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
565 unsigned long *next_delay) 479 unsigned long *next_delay)
566{ 480{
567 struct ieee80211_sub_if_data *sdata; 481 ieee80211_offchannel_stop_station(local);
482
483 __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
568 484
569 /* 485 /*
570 * notify the AP about us leaving the channel and stop all STA interfaces 486 * What if the nullfunc frames didn't arrive?
571 */ 487 */
572 mutex_lock(&local->iflist_mtx); 488 drv_flush(local, false);
573 list_for_each_entry(sdata, &local->interfaces, list) { 489 if (local->ops->flush)
574 if (!netif_running(sdata->dev)) 490 *next_delay = 0;
575 continue; 491 else
576 492 *next_delay = HZ / 10;
577 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
578 netif_stop_queue(sdata->dev);
579 if (sdata->u.mgd.associated)
580 ieee80211_scan_ps_enable(sdata);
581 }
582 }
583 mutex_unlock(&local->iflist_mtx);
584
585 __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
586 493
587 /* advance to the next channel to be scanned */ 494 /* advance to the next channel to be scanned */
588 *next_delay = HZ / 10;
589 local->next_scan_state = SCAN_SET_CHANNEL; 495 local->next_scan_state = SCAN_SET_CHANNEL;
590} 496}
591 497
592static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local, 498static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local,
593 unsigned long *next_delay) 499 unsigned long *next_delay)
594{ 500{
595 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
596
597 /* switch back to the operating channel */ 501 /* switch back to the operating channel */
598 local->scan_channel = NULL; 502 local->scan_channel = NULL;
599 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 503 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
600 504
601 /* 505 /*
602 * notify the AP about us being back and restart all STA interfaces 506 * Only re-enable station mode interface now; beaconing will be
507 * re-enabled once the full scan has been completed.
603 */ 508 */
604 mutex_lock(&local->iflist_mtx); 509 ieee80211_offchannel_return(local, false);
605 list_for_each_entry(sdata, &local->interfaces, list) {
606 if (!netif_running(sdata->dev))
607 continue;
608
609 /* Tell AP we're back */
610 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
611 if (sdata->u.mgd.associated)
612 ieee80211_scan_ps_disable(sdata);
613 netif_wake_queue(sdata->dev);
614 }
615 }
616 mutex_unlock(&local->iflist_mtx);
617 510
618 __clear_bit(SCAN_OFF_CHANNEL, &local->scanning); 511 __clear_bit(SCAN_OFF_CHANNEL, &local->scanning);
619 512
@@ -727,7 +620,7 @@ void ieee80211_scan_work(struct work_struct *work)
727 /* 620 /*
728 * Avoid re-scheduling when the sdata is going away. 621 * Avoid re-scheduling when the sdata is going away.
729 */ 622 */
730 if (!netif_running(sdata->dev)) { 623 if (!ieee80211_sdata_running(sdata)) {
731 ieee80211_scan_completed(&local->hw, true); 624 ieee80211_scan_completed(&local->hw, true);
732 return; 625 return;
733 } 626 }
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index aa743a895cf9..7733f66ee2c4 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -35,7 +35,7 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
35 35
36 if (!skb) { 36 if (!skb) {
37 printk(KERN_ERR "%s: failed to allocate buffer for " 37 printk(KERN_ERR "%s: failed to allocate buffer for "
38 "measurement report frame\n", sdata->dev->name); 38 "measurement report frame\n", sdata->name);
39 return; 39 return;
40 } 40 }
41 41
@@ -43,7 +43,7 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
43 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24); 43 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
44 memset(msr_report, 0, 24); 44 memset(msr_report, 0, 24);
45 memcpy(msr_report->da, da, ETH_ALEN); 45 memcpy(msr_report->da, da, ETH_ALEN);
46 memcpy(msr_report->sa, sdata->dev->dev_addr, ETH_ALEN); 46 memcpy(msr_report->sa, sdata->vif.addr, ETH_ALEN);
47 memcpy(msr_report->bssid, bssid, ETH_ALEN); 47 memcpy(msr_report->bssid, bssid, ETH_ALEN);
48 msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 48 msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
49 IEEE80211_STYPE_ACTION); 49 IEEE80211_STYPE_ACTION);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 71f370dd24bc..56422d894351 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -32,49 +32,33 @@
32 * for faster lookup and a list for iteration. They are managed using 32 * for faster lookup and a list for iteration. They are managed using
33 * RCU, i.e. access to the list and hash table is protected by RCU. 33 * RCU, i.e. access to the list and hash table is protected by RCU.
34 * 34 *
35 * Upon allocating a STA info structure with sta_info_alloc(), the caller owns 35 * Upon allocating a STA info structure with sta_info_alloc(), the caller
36 * that structure. It must then either destroy it using sta_info_destroy() 36 * owns that structure. It must then insert it into the hash table using
37 * (which is pretty useless) or insert it into the hash table using 37 * either sta_info_insert() or sta_info_insert_rcu(); only in the latter
38 * sta_info_insert() which demotes the reference from ownership to a regular 38 * case (which acquires an rcu read section but must not be called from
39 * RCU-protected reference; if the function is called without protection by an 39 * within one) will the pointer still be valid after the call. Note that
40 * RCU critical section the reference is instantly invalidated. Note that the 40 * the caller may not do much with the STA info before inserting it, in
41 * caller may not do much with the STA info before inserting it, in particular, 41 * particular, it may not start any mesh peer link management or add
42 * it may not start any mesh peer link management or add encryption keys. 42 * encryption keys.
43 * 43 *
44 * When the insertion fails (sta_info_insert()) returns non-zero), the 44 * When the insertion fails (sta_info_insert()) returns non-zero), the
45 * structure will have been freed by sta_info_insert()! 45 * structure will have been freed by sta_info_insert()!
46 * 46 *
47 * sta entries are added by mac80211 when you establish a link with a 47 * Station entries are added by mac80211 when you establish a link with a
48 * peer. This means different things for the different type of interfaces 48 * peer. This means different things for the different type of interfaces
49 * we support. For a regular station this mean we add the AP sta when we 49 * we support. For a regular station this mean we add the AP sta when we
50 * receive an assocation response from the AP. For IBSS this occurs when 50 * receive an assocation response from the AP. For IBSS this occurs when
51 * we receive a probe response or a beacon from target IBSS network. For 51 * get to know about a peer on the same IBSS. For WDS we add the sta for
52 * WDS we add the sta for the peer imediately upon device open. When using 52 * the peer imediately upon device open. When using AP mode we add stations
53 * AP mode we add stations for each respective station upon request from 53 * for each respective station upon request from userspace through nl80211.
54 * userspace through nl80211.
55 * 54 *
56 * Because there are debugfs entries for each station, and adding those 55 * In order to remove a STA info structure, various sta_info_destroy_*()
57 * must be able to sleep, it is also possible to "pin" a station entry, 56 * calls are available.
58 * that means it can be removed from the hash table but not be freed.
59 * See the comment in __sta_info_unlink() for more information, this is
60 * an internal capability only.
61 * 57 *
62 * In order to remove a STA info structure, the caller needs to first 58 * There is no concept of ownership on a STA entry, each structure is
63 * unlink it (sta_info_unlink()) from the list and hash tables and 59 * owned by the global hash table/list until it is removed. All users of
64 * then destroy it; sta_info_destroy() will wait for an RCU grace period 60 * the structure need to be RCU protected so that the structure won't be
65 * to elapse before actually freeing it. Due to the pinning and the 61 * freed before they are done using it.
66 * possibility of multiple callers trying to remove the same STA info at
67 * the same time, sta_info_unlink() can clear the STA info pointer it is
68 * passed to indicate that the STA info is owned by somebody else now.
69 *
70 * If sta_info_unlink() did not clear the pointer then the caller owns
71 * the STA info structure now and is responsible of destroying it with
72 * a call to sta_info_destroy().
73 *
74 * In all other cases, there is no concept of ownership on a STA entry,
75 * each structure is owned by the global hash table/list until it is
76 * removed. All users of the structure need to be RCU protected so that
77 * the structure won't be freed before they are done using it.
78 */ 62 */
79 63
80/* Caller must hold local->sta_lock */ 64/* Caller must hold local->sta_lock */
@@ -103,13 +87,37 @@ static int sta_info_hash_del(struct ieee80211_local *local,
103} 87}
104 88
105/* protected by RCU */ 89/* protected by RCU */
106struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr) 90struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
91 const u8 *addr)
107{ 92{
93 struct ieee80211_local *local = sdata->local;
108 struct sta_info *sta; 94 struct sta_info *sta;
109 95
110 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); 96 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
111 while (sta) { 97 while (sta) {
112 if (memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 98 if (sta->sdata == sdata &&
99 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
100 break;
101 sta = rcu_dereference(sta->hnext);
102 }
103 return sta;
104}
105
106/*
107 * Get sta info either from the specified interface
108 * or from one of its vlans
109 */
110struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
111 const u8 *addr)
112{
113 struct ieee80211_local *local = sdata->local;
114 struct sta_info *sta;
115
116 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
117 while (sta) {
118 if ((sta->sdata == sdata ||
119 sta->sdata->bss == sdata->bss) &&
120 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
113 break; 121 break;
114 sta = rcu_dereference(sta->hnext); 122 sta = rcu_dereference(sta->hnext);
115 } 123 }
@@ -161,101 +169,6 @@ static void __sta_info_free(struct ieee80211_local *local,
161 kfree(sta); 169 kfree(sta);
162} 170}
163 171
164void sta_info_destroy(struct sta_info *sta)
165{
166 struct ieee80211_local *local;
167 struct sk_buff *skb;
168 int i;
169
170 might_sleep();
171
172 if (!sta)
173 return;
174
175 local = sta->local;
176
177 cancel_work_sync(&sta->drv_unblock_wk);
178
179 rate_control_remove_sta_debugfs(sta);
180 ieee80211_sta_debugfs_remove(sta);
181
182#ifdef CONFIG_MAC80211_MESH
183 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
184 mesh_plink_deactivate(sta);
185#endif
186
187 /*
188 * We have only unlinked the key, and actually destroying it
189 * may mean it is removed from hardware which requires that
190 * the key->sta pointer is still valid, so flush the key todo
191 * list here.
192 *
193 * ieee80211_key_todo() will synchronize_rcu() so after this
194 * nothing can reference this sta struct any more.
195 */
196 ieee80211_key_todo();
197
198#ifdef CONFIG_MAC80211_MESH
199 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
200 del_timer_sync(&sta->plink_timer);
201#endif
202
203 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
204 local->total_ps_buffered--;
205 dev_kfree_skb_any(skb);
206 }
207
208 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
209 dev_kfree_skb_any(skb);
210
211 for (i = 0; i < STA_TID_NUM; i++) {
212 struct tid_ampdu_rx *tid_rx;
213 struct tid_ampdu_tx *tid_tx;
214
215 spin_lock_bh(&sta->lock);
216 tid_rx = sta->ampdu_mlme.tid_rx[i];
217 /* Make sure timer won't free the tid_rx struct, see below */
218 if (tid_rx)
219 tid_rx->shutdown = true;
220
221 spin_unlock_bh(&sta->lock);
222
223 /*
224 * Outside spinlock - shutdown is true now so that the timer
225 * won't free tid_rx, we have to do that now. Can't let the
226 * timer do it because we have to sync the timer outside the
227 * lock that it takes itself.
228 */
229 if (tid_rx) {
230 del_timer_sync(&tid_rx->session_timer);
231 kfree(tid_rx);
232 }
233
234 /*
235 * No need to do such complications for TX agg sessions, the
236 * path leading to freeing the tid_tx struct goes via a call
237 * from the driver, and thus needs to look up the sta struct
238 * again, which cannot be found when we get here. Hence, we
239 * just need to delete the timer and free the aggregation
240 * info; we won't be telling the peer about it then but that
241 * doesn't matter if we're not talking to it again anyway.
242 */
243 tid_tx = sta->ampdu_mlme.tid_tx[i];
244 if (tid_tx) {
245 del_timer_sync(&tid_tx->addba_resp_timer);
246 /*
247 * STA removed while aggregation session being
248 * started? Bit odd, but purge frames anyway.
249 */
250 skb_queue_purge(&tid_tx->pending);
251 kfree(tid_tx);
252 }
253 }
254
255 __sta_info_free(local, sta);
256}
257
258
259/* Caller must hold local->sta_lock */ 172/* Caller must hold local->sta_lock */
260static void sta_info_hash_add(struct ieee80211_local *local, 173static void sta_info_hash_add(struct ieee80211_local *local,
261 struct sta_info *sta) 174 struct sta_info *sta)
@@ -352,7 +265,93 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
352 return sta; 265 return sta;
353} 266}
354 267
355int sta_info_insert(struct sta_info *sta) 268static int sta_info_finish_insert(struct sta_info *sta, bool async)
269{
270 struct ieee80211_local *local = sta->local;
271 struct ieee80211_sub_if_data *sdata = sta->sdata;
272 struct station_info sinfo;
273 unsigned long flags;
274 int err = 0;
275
276 WARN_ON(!mutex_is_locked(&local->sta_mtx));
277
278 /* notify driver */
279 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
280 sdata = container_of(sdata->bss,
281 struct ieee80211_sub_if_data,
282 u.ap);
283 err = drv_sta_add(local, sdata, &sta->sta);
284 if (err) {
285 if (!async)
286 return err;
287 printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to driver (%d)"
288 " - keeping it anyway.\n",
289 sdata->name, sta->sta.addr, err);
290 } else {
291 sta->uploaded = true;
292#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
293 if (async)
294 printk(KERN_DEBUG "%s: Finished adding IBSS STA %pM\n",
295 wiphy_name(local->hw.wiphy), sta->sta.addr);
296#endif
297 }
298
299 sdata = sta->sdata;
300
301 if (!async) {
302 local->num_sta++;
303 local->sta_generation++;
304 smp_mb();
305
306 /* make the station visible */
307 spin_lock_irqsave(&local->sta_lock, flags);
308 sta_info_hash_add(local, sta);
309 spin_unlock_irqrestore(&local->sta_lock, flags);
310 }
311
312 list_add(&sta->list, &local->sta_list);
313
314 ieee80211_sta_debugfs_add(sta);
315 rate_control_add_sta_debugfs(sta);
316
317 sinfo.filled = 0;
318 sinfo.generation = local->sta_generation;
319 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
320
321
322 return 0;
323}
324
325static void sta_info_finish_pending(struct ieee80211_local *local)
326{
327 struct sta_info *sta;
328 unsigned long flags;
329
330 spin_lock_irqsave(&local->sta_lock, flags);
331 while (!list_empty(&local->sta_pending_list)) {
332 sta = list_first_entry(&local->sta_pending_list,
333 struct sta_info, list);
334 list_del(&sta->list);
335 spin_unlock_irqrestore(&local->sta_lock, flags);
336
337 sta_info_finish_insert(sta, true);
338
339 spin_lock_irqsave(&local->sta_lock, flags);
340 }
341 spin_unlock_irqrestore(&local->sta_lock, flags);
342}
343
344static void sta_info_finish_work(struct work_struct *work)
345{
346 struct ieee80211_local *local =
347 container_of(work, struct ieee80211_local, sta_finish_work);
348
349 mutex_lock(&local->sta_mtx);
350 sta_info_finish_pending(local);
351 mutex_unlock(&local->sta_mtx);
352}
353
354int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
356{ 355{
357 struct ieee80211_local *local = sta->local; 356 struct ieee80211_local *local = sta->local;
358 struct ieee80211_sub_if_data *sdata = sta->sdata; 357 struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -364,38 +363,90 @@ int sta_info_insert(struct sta_info *sta)
364 * something inserts a STA (on one CPU) without holding the RTNL 363 * something inserts a STA (on one CPU) without holding the RTNL
365 * and another CPU turns off the net device. 364 * and another CPU turns off the net device.
366 */ 365 */
367 if (unlikely(!netif_running(sdata->dev))) { 366 if (unlikely(!ieee80211_sdata_running(sdata))) {
368 err = -ENETDOWN; 367 err = -ENETDOWN;
368 rcu_read_lock();
369 goto out_free; 369 goto out_free;
370 } 370 }
371 371
372 if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->dev->dev_addr) == 0 || 372 if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 ||
373 is_multicast_ether_addr(sta->sta.addr))) { 373 is_multicast_ether_addr(sta->sta.addr))) {
374 err = -EINVAL; 374 err = -EINVAL;
375 rcu_read_lock();
375 goto out_free; 376 goto out_free;
376 } 377 }
377 378
379 /*
380 * In ad-hoc mode, we sometimes need to insert stations
381 * from tasklet context from the RX path. To avoid races,
382 * always do so in that case -- see the comment below.
383 */
384 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
385 spin_lock_irqsave(&local->sta_lock, flags);
386 /* check if STA exists already */
387 if (sta_info_get_bss(sdata, sta->sta.addr)) {
388 spin_unlock_irqrestore(&local->sta_lock, flags);
389 rcu_read_lock();
390 err = -EEXIST;
391 goto out_free;
392 }
393
394 local->num_sta++;
395 local->sta_generation++;
396 smp_mb();
397 sta_info_hash_add(local, sta);
398
399 list_add_tail(&sta->list, &local->sta_pending_list);
400
401 rcu_read_lock();
402 spin_unlock_irqrestore(&local->sta_lock, flags);
403
404#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
405 printk(KERN_DEBUG "%s: Added IBSS STA %pM\n",
406 wiphy_name(local->hw.wiphy), sta->sta.addr);
407#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
408
409 ieee80211_queue_work(&local->hw, &local->sta_finish_work);
410
411 return 0;
412 }
413
414 /*
415 * On first glance, this will look racy, because the code
416 * below this point, which inserts a station with sleeping,
417 * unlocks the sta_lock between checking existence in the
418 * hash table and inserting into it.
419 *
420 * However, it is not racy against itself because it keeps
421 * the mutex locked. It still seems to race against the
422 * above code that atomically inserts the station... That,
423 * however, is not true because the above code can only
424 * be invoked for IBSS interfaces, and the below code will
425 * not be -- and the two do not race against each other as
426 * the hash table also keys off the interface.
427 */
428
429 might_sleep();
430
431 mutex_lock(&local->sta_mtx);
432
378 spin_lock_irqsave(&local->sta_lock, flags); 433 spin_lock_irqsave(&local->sta_lock, flags);
379 /* check if STA exists already */ 434 /* check if STA exists already */
380 if (sta_info_get(local, sta->sta.addr)) { 435 if (sta_info_get_bss(sdata, sta->sta.addr)) {
381 spin_unlock_irqrestore(&local->sta_lock, flags); 436 spin_unlock_irqrestore(&local->sta_lock, flags);
437 mutex_unlock(&local->sta_mtx);
438 rcu_read_lock();
382 err = -EEXIST; 439 err = -EEXIST;
383 goto out_free; 440 goto out_free;
384 } 441 }
385 list_add(&sta->list, &local->sta_list);
386 local->sta_generation++;
387 local->num_sta++;
388 sta_info_hash_add(local, sta);
389 442
390 /* notify driver */ 443 spin_unlock_irqrestore(&local->sta_lock, flags);
391 if (local->ops->sta_notify) {
392 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
393 sdata = container_of(sdata->bss,
394 struct ieee80211_sub_if_data,
395 u.ap);
396 444
397 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD, &sta->sta); 445 err = sta_info_finish_insert(sta, false);
398 sdata = sta->sdata; 446 if (err) {
447 mutex_unlock(&local->sta_mtx);
448 rcu_read_lock();
449 goto out_free;
399 } 450 }
400 451
401#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 452#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -403,18 +454,9 @@ int sta_info_insert(struct sta_info *sta)
403 wiphy_name(local->hw.wiphy), sta->sta.addr); 454 wiphy_name(local->hw.wiphy), sta->sta.addr);
404#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 455#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
405 456
406 spin_unlock_irqrestore(&local->sta_lock, flags); 457 /* move reference to rcu-protected */
407 458 rcu_read_lock();
408#ifdef CONFIG_MAC80211_DEBUGFS 459 mutex_unlock(&local->sta_mtx);
409 /*
410 * Debugfs entry adding might sleep, so schedule process
411 * context task for adding entry for STAs that do not yet
412 * have one.
413 * NOTE: due to auto-freeing semantics this may only be done
414 * if the insertion is successful!
415 */
416 schedule_work(&local->sta_debugfs_add);
417#endif
418 460
419 if (ieee80211_vif_is_mesh(&sdata->vif)) 461 if (ieee80211_vif_is_mesh(&sdata->vif))
420 mesh_accept_plinks_update(sdata); 462 mesh_accept_plinks_update(sdata);
@@ -426,6 +468,15 @@ int sta_info_insert(struct sta_info *sta)
426 return err; 468 return err;
427} 469}
428 470
471int sta_info_insert(struct sta_info *sta)
472{
473 int err = sta_info_insert_rcu(sta);
474
475 rcu_read_unlock();
476
477 return err;
478}
479
429static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid) 480static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid)
430{ 481{
431 /* 482 /*
@@ -494,108 +545,6 @@ void sta_info_clear_tim_bit(struct sta_info *sta)
494 spin_unlock_irqrestore(&sta->local->sta_lock, flags); 545 spin_unlock_irqrestore(&sta->local->sta_lock, flags);
495} 546}
496 547
497static void __sta_info_unlink(struct sta_info **sta)
498{
499 struct ieee80211_local *local = (*sta)->local;
500 struct ieee80211_sub_if_data *sdata = (*sta)->sdata;
501 /*
502 * pull caller's reference if we're already gone.
503 */
504 if (sta_info_hash_del(local, *sta)) {
505 *sta = NULL;
506 return;
507 }
508
509 if ((*sta)->key) {
510 ieee80211_key_free((*sta)->key);
511 WARN_ON((*sta)->key);
512 }
513
514 list_del(&(*sta)->list);
515 (*sta)->dead = true;
516
517 if (test_and_clear_sta_flags(*sta,
518 WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) {
519 BUG_ON(!sdata->bss);
520
521 atomic_dec(&sdata->bss->num_sta_ps);
522 __sta_info_clear_tim_bit(sdata->bss, *sta);
523 }
524
525 local->num_sta--;
526 local->sta_generation++;
527
528 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
529 rcu_assign_pointer(sdata->u.vlan.sta, NULL);
530
531 if (local->ops->sta_notify) {
532 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
533 sdata = container_of(sdata->bss,
534 struct ieee80211_sub_if_data,
535 u.ap);
536
537 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE,
538 &(*sta)->sta);
539 sdata = (*sta)->sdata;
540 }
541
542 if (ieee80211_vif_is_mesh(&sdata->vif)) {
543 mesh_accept_plinks_update(sdata);
544#ifdef CONFIG_MAC80211_MESH
545 del_timer(&(*sta)->plink_timer);
546#endif
547 }
548
549#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
550 printk(KERN_DEBUG "%s: Removed STA %pM\n",
551 wiphy_name(local->hw.wiphy), (*sta)->sta.addr);
552#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
553
554 /*
555 * Finally, pull caller's reference if the STA is pinned by the
556 * task that is adding the debugfs entries. In that case, we
557 * leave the STA "to be freed".
558 *
559 * The rules are not trivial, but not too complex either:
560 * (1) pin_status is only modified under the sta_lock
561 * (2) STAs may only be pinned under the RTNL so that
562 * sta_info_flush() is guaranteed to actually destroy
563 * all STAs that are active for a given interface, this
564 * is required for correctness because otherwise we
565 * could notify a driver that an interface is going
566 * away and only after that (!) notify it about a STA
567 * on that interface going away.
568 * (3) sta_info_debugfs_add_work() will set the status
569 * to PINNED when it found an item that needs a new
570 * debugfs directory created. In that case, that item
571 * must not be freed although all *RCU* users are done
572 * with it. Hence, we tell the caller of _unlink()
573 * that the item is already gone (as can happen when
574 * two tasks try to unlink/destroy at the same time)
575 * (4) We set the pin_status to DESTROY here when we
576 * find such an item.
577 * (5) sta_info_debugfs_add_work() will reset the pin_status
578 * from PINNED to NORMAL when it is done with the item,
579 * but will check for DESTROY before resetting it in
580 * which case it will free the item.
581 */
582 if ((*sta)->pin_status == STA_INFO_PIN_STAT_PINNED) {
583 (*sta)->pin_status = STA_INFO_PIN_STAT_DESTROY;
584 *sta = NULL;
585 return;
586 }
587}
588
589void sta_info_unlink(struct sta_info **sta)
590{
591 struct ieee80211_local *local = (*sta)->local;
592 unsigned long flags;
593
594 spin_lock_irqsave(&local->sta_lock, flags);
595 __sta_info_unlink(sta);
596 spin_unlock_irqrestore(&local->sta_lock, flags);
597}
598
599static int sta_info_buffer_expired(struct sta_info *sta, 548static int sta_info_buffer_expired(struct sta_info *sta,
600 struct sk_buff *skb) 549 struct sk_buff *skb)
601{ 550{
@@ -652,109 +601,209 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
652 } 601 }
653} 602}
654 603
655 604static int __must_check __sta_info_destroy(struct sta_info *sta)
656static void sta_info_cleanup(unsigned long data)
657{ 605{
658 struct ieee80211_local *local = (struct ieee80211_local *) data; 606 struct ieee80211_local *local;
659 struct sta_info *sta; 607 struct ieee80211_sub_if_data *sdata;
608 struct sk_buff *skb;
609 unsigned long flags;
610 int ret, i;
660 611
661 rcu_read_lock(); 612 might_sleep();
662 list_for_each_entry_rcu(sta, &local->sta_list, list)
663 sta_info_cleanup_expire_buffered(local, sta);
664 rcu_read_unlock();
665 613
666 if (local->quiescing) 614 if (!sta)
667 return; 615 return -ENOENT;
668 616
669 local->sta_cleanup.expires = 617 local = sta->local;
670 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); 618 sdata = sta->sdata;
671 add_timer(&local->sta_cleanup);
672}
673 619
674#ifdef CONFIG_MAC80211_DEBUGFS 620 spin_lock_irqsave(&local->sta_lock, flags);
675/* 621 ret = sta_info_hash_del(local, sta);
676 * See comment in __sta_info_unlink, 622 /* this might still be the pending list ... which is fine */
677 * caller must hold local->sta_lock. 623 if (!ret)
678 */ 624 list_del(&sta->list);
679static void __sta_info_pin(struct sta_info *sta) 625 spin_unlock_irqrestore(&local->sta_lock, flags);
680{ 626 if (ret)
681 WARN_ON(sta->pin_status != STA_INFO_PIN_STAT_NORMAL); 627 return ret;
682 sta->pin_status = STA_INFO_PIN_STAT_PINNED; 628
629 if (sta->key) {
630 ieee80211_key_free(sta->key);
631 /*
632 * We have only unlinked the key, and actually destroying it
633 * may mean it is removed from hardware which requires that
634 * the key->sta pointer is still valid, so flush the key todo
635 * list here.
636 *
637 * ieee80211_key_todo() will synchronize_rcu() so after this
638 * nothing can reference this sta struct any more.
639 */
640 ieee80211_key_todo();
641
642 WARN_ON(sta->key);
643 }
644
645 sta->dead = true;
646
647 if (test_and_clear_sta_flags(sta,
648 WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) {
649 BUG_ON(!sdata->bss);
650
651 atomic_dec(&sdata->bss->num_sta_ps);
652 __sta_info_clear_tim_bit(sdata->bss, sta);
653 }
654
655 local->num_sta--;
656 local->sta_generation++;
657
658 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
659 rcu_assign_pointer(sdata->u.vlan.sta, NULL);
660
661 if (sta->uploaded) {
662 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
663 sdata = container_of(sdata->bss,
664 struct ieee80211_sub_if_data,
665 u.ap);
666 drv_sta_remove(local, sdata, &sta->sta);
667 sdata = sta->sdata;
668 }
669
670#ifdef CONFIG_MAC80211_MESH
671 if (ieee80211_vif_is_mesh(&sdata->vif)) {
672 mesh_accept_plinks_update(sdata);
673 del_timer(&sta->plink_timer);
674 }
675#endif
676
677#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
678 printk(KERN_DEBUG "%s: Removed STA %pM\n",
679 wiphy_name(local->hw.wiphy), sta->sta.addr);
680#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
681 cancel_work_sync(&sta->drv_unblock_wk);
682
683 rate_control_remove_sta_debugfs(sta);
684 ieee80211_sta_debugfs_remove(sta);
685
686#ifdef CONFIG_MAC80211_MESH
687 if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
688 mesh_plink_deactivate(sta);
689 del_timer_sync(&sta->plink_timer);
690 }
691#endif
692
693 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
694 local->total_ps_buffered--;
695 dev_kfree_skb_any(skb);
696 }
697
698 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
699 dev_kfree_skb_any(skb);
700
701 for (i = 0; i < STA_TID_NUM; i++) {
702 struct tid_ampdu_rx *tid_rx;
703 struct tid_ampdu_tx *tid_tx;
704
705 spin_lock_bh(&sta->lock);
706 tid_rx = sta->ampdu_mlme.tid_rx[i];
707 /* Make sure timer won't free the tid_rx struct, see below */
708 if (tid_rx)
709 tid_rx->shutdown = true;
710
711 spin_unlock_bh(&sta->lock);
712
713 /*
714 * Outside spinlock - shutdown is true now so that the timer
715 * won't free tid_rx, we have to do that now. Can't let the
716 * timer do it because we have to sync the timer outside the
717 * lock that it takes itself.
718 */
719 if (tid_rx) {
720 del_timer_sync(&tid_rx->session_timer);
721 kfree(tid_rx);
722 }
723
724 /*
725 * No need to do such complications for TX agg sessions, the
726 * path leading to freeing the tid_tx struct goes via a call
727 * from the driver, and thus needs to look up the sta struct
728 * again, which cannot be found when we get here. Hence, we
729 * just need to delete the timer and free the aggregation
730 * info; we won't be telling the peer about it then but that
731 * doesn't matter if we're not talking to it again anyway.
732 */
733 tid_tx = sta->ampdu_mlme.tid_tx[i];
734 if (tid_tx) {
735 del_timer_sync(&tid_tx->addba_resp_timer);
736 /*
737 * STA removed while aggregation session being
738 * started? Bit odd, but purge frames anyway.
739 */
740 skb_queue_purge(&tid_tx->pending);
741 kfree(tid_tx);
742 }
743 }
744
745 __sta_info_free(local, sta);
746
747 return 0;
683} 748}
684 749
685/* 750int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr)
686 * See comment in __sta_info_unlink, returns sta if it
687 * needs to be destroyed.
688 */
689static struct sta_info *__sta_info_unpin(struct sta_info *sta)
690{ 751{
691 struct sta_info *ret = NULL; 752 struct sta_info *sta;
692 unsigned long flags; 753 int ret;
693 754
694 spin_lock_irqsave(&sta->local->sta_lock, flags); 755 mutex_lock(&sdata->local->sta_mtx);
695 WARN_ON(sta->pin_status != STA_INFO_PIN_STAT_DESTROY && 756 sta = sta_info_get(sdata, addr);
696 sta->pin_status != STA_INFO_PIN_STAT_PINNED); 757 ret = __sta_info_destroy(sta);
697 if (sta->pin_status == STA_INFO_PIN_STAT_DESTROY) 758 mutex_unlock(&sdata->local->sta_mtx);
698 ret = sta;
699 sta->pin_status = STA_INFO_PIN_STAT_NORMAL;
700 spin_unlock_irqrestore(&sta->local->sta_lock, flags);
701 759
702 return ret; 760 return ret;
703} 761}
704 762
705static void sta_info_debugfs_add_work(struct work_struct *work) 763int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
764 const u8 *addr)
706{ 765{
707 struct ieee80211_local *local = 766 struct sta_info *sta;
708 container_of(work, struct ieee80211_local, sta_debugfs_add); 767 int ret;
709 struct sta_info *sta, *tmp;
710 unsigned long flags;
711 768
712 /* We need to keep the RTNL across the whole pinned status. */ 769 mutex_lock(&sdata->local->sta_mtx);
713 rtnl_lock(); 770 sta = sta_info_get_bss(sdata, addr);
714 while (1) { 771 ret = __sta_info_destroy(sta);
715 sta = NULL; 772 mutex_unlock(&sdata->local->sta_mtx);
716 773
717 spin_lock_irqsave(&local->sta_lock, flags); 774 return ret;
718 list_for_each_entry(tmp, &local->sta_list, list) { 775}
719 /*
720 * debugfs.add_has_run will be set by
721 * ieee80211_sta_debugfs_add regardless
722 * of what else it does.
723 */
724 if (!tmp->debugfs.add_has_run) {
725 sta = tmp;
726 __sta_info_pin(sta);
727 break;
728 }
729 }
730 spin_unlock_irqrestore(&local->sta_lock, flags);
731 776
732 if (!sta) 777static void sta_info_cleanup(unsigned long data)
733 break; 778{
779 struct ieee80211_local *local = (struct ieee80211_local *) data;
780 struct sta_info *sta;
734 781
735 ieee80211_sta_debugfs_add(sta); 782 rcu_read_lock();
736 rate_control_add_sta_debugfs(sta); 783 list_for_each_entry_rcu(sta, &local->sta_list, list)
784 sta_info_cleanup_expire_buffered(local, sta);
785 rcu_read_unlock();
737 786
738 sta = __sta_info_unpin(sta); 787 if (local->quiescing)
739 sta_info_destroy(sta); 788 return;
740 } 789
741 rtnl_unlock(); 790 local->sta_cleanup.expires =
791 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
792 add_timer(&local->sta_cleanup);
742} 793}
743#endif
744 794
745void sta_info_init(struct ieee80211_local *local) 795void sta_info_init(struct ieee80211_local *local)
746{ 796{
747 spin_lock_init(&local->sta_lock); 797 spin_lock_init(&local->sta_lock);
798 mutex_init(&local->sta_mtx);
748 INIT_LIST_HEAD(&local->sta_list); 799 INIT_LIST_HEAD(&local->sta_list);
800 INIT_LIST_HEAD(&local->sta_pending_list);
801 INIT_WORK(&local->sta_finish_work, sta_info_finish_work);
749 802
750 setup_timer(&local->sta_cleanup, sta_info_cleanup, 803 setup_timer(&local->sta_cleanup, sta_info_cleanup,
751 (unsigned long)local); 804 (unsigned long)local);
752 local->sta_cleanup.expires = 805 local->sta_cleanup.expires =
753 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); 806 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
754
755#ifdef CONFIG_MAC80211_DEBUGFS
756 INIT_WORK(&local->sta_debugfs_add, sta_info_debugfs_add_work);
757#endif
758} 807}
759 808
760int sta_info_start(struct ieee80211_local *local) 809int sta_info_start(struct ieee80211_local *local)
@@ -766,16 +815,6 @@ int sta_info_start(struct ieee80211_local *local)
766void sta_info_stop(struct ieee80211_local *local) 815void sta_info_stop(struct ieee80211_local *local)
767{ 816{
768 del_timer(&local->sta_cleanup); 817 del_timer(&local->sta_cleanup);
769#ifdef CONFIG_MAC80211_DEBUGFS
770 /*
771 * Make sure the debugfs adding work isn't pending after this
772 * because we're about to be destroyed. It doesn't matter
773 * whether it ran or not since we're going to flush all STAs
774 * anyway.
775 */
776 cancel_work_sync(&local->sta_debugfs_add);
777#endif
778
779 sta_info_flush(local, NULL); 818 sta_info_flush(local, NULL);
780} 819}
781 820
@@ -791,26 +830,19 @@ int sta_info_flush(struct ieee80211_local *local,
791 struct ieee80211_sub_if_data *sdata) 830 struct ieee80211_sub_if_data *sdata)
792{ 831{
793 struct sta_info *sta, *tmp; 832 struct sta_info *sta, *tmp;
794 LIST_HEAD(tmp_list);
795 int ret = 0; 833 int ret = 0;
796 unsigned long flags;
797 834
798 might_sleep(); 835 might_sleep();
799 836
800 spin_lock_irqsave(&local->sta_lock, flags); 837 mutex_lock(&local->sta_mtx);
838
839 sta_info_finish_pending(local);
840
801 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 841 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
802 if (!sdata || sdata == sta->sdata) { 842 if (!sdata || sdata == sta->sdata)
803 __sta_info_unlink(&sta); 843 WARN_ON(__sta_info_destroy(sta));
804 if (sta) {
805 list_add_tail(&sta->list, &tmp_list);
806 ret++;
807 }
808 }
809 } 844 }
810 spin_unlock_irqrestore(&local->sta_lock, flags); 845 mutex_unlock(&local->sta_mtx);
811
812 list_for_each_entry_safe(sta, tmp, &tmp_list, list)
813 sta_info_destroy(sta);
814 846
815 return ret; 847 return ret;
816} 848}
@@ -820,34 +852,28 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
820{ 852{
821 struct ieee80211_local *local = sdata->local; 853 struct ieee80211_local *local = sdata->local;
822 struct sta_info *sta, *tmp; 854 struct sta_info *sta, *tmp;
823 LIST_HEAD(tmp_list);
824 unsigned long flags;
825 855
826 spin_lock_irqsave(&local->sta_lock, flags); 856 mutex_lock(&local->sta_mtx);
827 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) 857 list_for_each_entry_safe(sta, tmp, &local->sta_list, list)
828 if (time_after(jiffies, sta->last_rx + exp_time)) { 858 if (time_after(jiffies, sta->last_rx + exp_time)) {
829#ifdef CONFIG_MAC80211_IBSS_DEBUG 859#ifdef CONFIG_MAC80211_IBSS_DEBUG
830 printk(KERN_DEBUG "%s: expiring inactive STA %pM\n", 860 printk(KERN_DEBUG "%s: expiring inactive STA %pM\n",
831 sdata->dev->name, sta->sta.addr); 861 sdata->name, sta->sta.addr);
832#endif 862#endif
833 __sta_info_unlink(&sta); 863 WARN_ON(__sta_info_destroy(sta));
834 if (sta)
835 list_add(&sta->list, &tmp_list);
836 } 864 }
837 spin_unlock_irqrestore(&local->sta_lock, flags); 865 mutex_unlock(&local->sta_mtx);
838
839 list_for_each_entry_safe(sta, tmp, &tmp_list, list)
840 sta_info_destroy(sta);
841} 866}
842 867
843struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw, 868struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw,
844 const u8 *addr) 869 const u8 *addr)
845{ 870{
846 struct sta_info *sta = sta_info_get(hw_to_local(hw), addr); 871 struct sta_info *sta, *nxt;
847 872
848 if (!sta) 873 /* Just return a random station ... first in list ... */
849 return NULL; 874 for_each_sta_info(hw_to_local(hw), addr, sta, nxt)
850 return &sta->sta; 875 return &sta->sta;
876 return NULL;
851} 877}
852EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw); 878EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw);
853 879
@@ -872,7 +898,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
872 struct ieee80211_local *local = sdata->local; 898 struct ieee80211_local *local = sdata->local;
873 int sent, buffered; 899 int sent, buffered;
874 900
875 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta); 901 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
876 902
877 if (!skb_queue_empty(&sta->ps_tx_buf)) 903 if (!skb_queue_empty(&sta->ps_tx_buf))
878 sta_info_clear_tim_bit(sta); 904 sta_info_clear_tim_bit(sta);
@@ -885,7 +911,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
885 911
886#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 912#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
887 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames " 913 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
888 "since STA not sleeping anymore\n", sdata->dev->name, 914 "since STA not sleeping anymore\n", sdata->name,
889 sta->sta.addr, sta->sta.aid, sent - buffered, buffered); 915 sta->sta.addr, sta->sta.aid, sent - buffered, buffered);
890#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 916#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
891} 917}
@@ -944,7 +970,7 @@ void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta)
944 */ 970 */
945 printk(KERN_DEBUG "%s: STA %pM sent PS Poll even " 971 printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
946 "though there are no buffered frames for it\n", 972 "though there are no buffered frames for it\n",
947 sdata->dev->name, sta->sta.addr); 973 sdata->name, sta->sta.addr);
948#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 974#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
949 } 975 }
950} 976}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index b4810f6aa94f..822d84522937 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -42,6 +42,9 @@
42 * be in the queues 42 * be in the queues
43 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping 43 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
44 * station in power-save mode, reply when the driver unblocks. 44 * station in power-save mode, reply when the driver unblocks.
45 * @WLAN_STA_DISASSOC: Disassociation in progress.
46 * This is used to reject TX BA session requests when disassociation
47 * is in progress.
45 */ 48 */
46enum ieee80211_sta_info_flags { 49enum ieee80211_sta_info_flags {
47 WLAN_STA_AUTH = 1<<0, 50 WLAN_STA_AUTH = 1<<0,
@@ -57,6 +60,7 @@ enum ieee80211_sta_info_flags {
57 WLAN_STA_SUSPEND = 1<<11, 60 WLAN_STA_SUSPEND = 1<<11,
58 WLAN_STA_PS_DRIVER = 1<<12, 61 WLAN_STA_PS_DRIVER = 1<<12,
59 WLAN_STA_PSPOLL = 1<<13, 62 WLAN_STA_PSPOLL = 1<<13,
63 WLAN_STA_DISASSOC = 1<<14,
60}; 64};
61 65
62#define STA_TID_NUM 16 66#define STA_TID_NUM 16
@@ -162,11 +166,6 @@ struct sta_ampdu_mlme {
162}; 166};
163 167
164 168
165/* see __sta_info_unlink */
166#define STA_INFO_PIN_STAT_NORMAL 0
167#define STA_INFO_PIN_STAT_PINNED 1
168#define STA_INFO_PIN_STAT_DESTROY 2
169
170/** 169/**
171 * struct sta_info - STA information 170 * struct sta_info - STA information
172 * 171 *
@@ -187,7 +186,6 @@ struct sta_ampdu_mlme {
187 * @flaglock: spinlock for flags accesses 186 * @flaglock: spinlock for flags accesses
188 * @drv_unblock_wk: used for driver PS unblocking 187 * @drv_unblock_wk: used for driver PS unblocking
189 * @listen_interval: listen interval of this station, when we're acting as AP 188 * @listen_interval: listen interval of this station, when we're acting as AP
190 * @pin_status: used internally for pinning a STA struct into memory
191 * @flags: STA flags, see &enum ieee80211_sta_info_flags 189 * @flags: STA flags, see &enum ieee80211_sta_info_flags
192 * @ps_tx_buf: buffer of frames to transmit to this station 190 * @ps_tx_buf: buffer of frames to transmit to this station
193 * when it leaves power saving state 191 * when it leaves power saving state
@@ -226,6 +224,7 @@ struct sta_ampdu_mlme {
226 * @debugfs: debug filesystem info 224 * @debugfs: debug filesystem info
227 * @sta: station information we share with the driver 225 * @sta: station information we share with the driver
228 * @dead: set to true when sta is unlinked 226 * @dead: set to true when sta is unlinked
227 * @uploaded: set to true when sta is uploaded to the driver
229 */ 228 */
230struct sta_info { 229struct sta_info {
231 /* General information, mostly static */ 230 /* General information, mostly static */
@@ -245,11 +244,7 @@ struct sta_info {
245 244
246 bool dead; 245 bool dead;
247 246
248 /* 247 bool uploaded;
249 * for use by the internal lifetime management,
250 * see __sta_info_unlink
251 */
252 u8 pin_status;
253 248
254 /* 249 /*
255 * frequently updated, locked with own spinlock (flaglock), 250 * frequently updated, locked with own spinlock (flaglock),
@@ -403,9 +398,37 @@ static inline u32 get_sta_flags(struct sta_info *sta)
403#define STA_INFO_CLEANUP_INTERVAL (10 * HZ) 398#define STA_INFO_CLEANUP_INTERVAL (10 * HZ)
404 399
405/* 400/*
406 * Get a STA info, must have be under RCU read lock. 401 * Get a STA info, must be under RCU read lock.
407 */ 402 */
408struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr); 403struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
404 const u8 *addr);
405
406struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
407 const u8 *addr);
408
409static inline
410void for_each_sta_info_type_check(struct ieee80211_local *local,
411 const u8 *addr,
412 struct sta_info *sta,
413 struct sta_info *nxt)
414{
415}
416
417#define for_each_sta_info(local, _addr, sta, nxt) \
418 for ( /* initialise loop */ \
419 sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
420 nxt = sta ? rcu_dereference(sta->hnext) : NULL; \
421 /* typecheck */ \
422 for_each_sta_info_type_check(local, (_addr), sta, nxt), \
423 /* continue condition */ \
424 sta; \
425 /* advance loop */ \
426 sta = nxt, \
427 nxt = sta ? rcu_dereference(sta->hnext) : NULL \
428 ) \
429 /* compare address and run code only if it matches */ \
430 if (memcmp(sta->sta.addr, (_addr), ETH_ALEN) == 0)
431
409/* 432/*
410 * Get STA info by index, BROKEN! 433 * Get STA info by index, BROKEN!
411 */ 434 */
@@ -421,18 +444,19 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
421 * Insert STA info into hash table/list, returns zero or a 444 * Insert STA info into hash table/list, returns zero or a
422 * -EEXIST if (if the same MAC address is already present). 445 * -EEXIST if (if the same MAC address is already present).
423 * 446 *
424 * Calling this without RCU protection makes the caller 447 * Calling the non-rcu version makes the caller relinquish,
425 * relinquish its reference to @sta. 448 * the _rcu version calls read_lock_rcu() and must be called
449 * without it held.
426 */ 450 */
427int sta_info_insert(struct sta_info *sta); 451int sta_info_insert(struct sta_info *sta);
428/* 452int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU);
429 * Unlink a STA info from the hash table/list. 453int sta_info_insert_atomic(struct sta_info *sta);
430 * This can NULL the STA pointer if somebody else 454
431 * has already unlinked it. 455int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata,
432 */ 456 const u8 *addr);
433void sta_info_unlink(struct sta_info **sta); 457int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
458 const u8 *addr);
434 459
435void sta_info_destroy(struct sta_info *sta);
436void sta_info_set_tim_bit(struct sta_info *sta); 460void sta_info_set_tim_bit(struct sta_info *sta);
437void sta_info_clear_tim_bit(struct sta_info *sta); 461void sta_info_clear_tim_bit(struct sta_info *sta);
438 462
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index d78f36c64c7b..56d5b9a6ec5b 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -2,7 +2,7 @@
2 * Copyright 2002-2005, Instant802 Networks, Inc. 2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2008-2009 Johannes Berg <johannes@sipsolutions.net> 5 * Copyright 2008-2010 Johannes Berg <johannes@sipsolutions.net>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -45,29 +45,19 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
45 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 45 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
46 46
47 /* 47 /*
48 * XXX: This is temporary! 48 * This skb 'survived' a round-trip through the driver, and
49 * 49 * hopefully the driver didn't mangle it too badly. However,
50 * The problem here is that when we get here, the driver will 50 * we can definitely not rely on the the control information
51 * quite likely have pretty much overwritten info->control by 51 * being correct. Clear it so we don't get junk there, and
52 * using info->driver_data or info->rate_driver_data. Thus, 52 * indicate that it needs new processing, but must not be
53 * when passing out the frame to the driver again, we would be 53 * modified/encrypted again.
54 * passing completely bogus data since the driver would then
55 * expect a properly filled info->control. In mac80211 itself
56 * the same problem occurs, since we need info->control.vif
57 * internally.
58 *
59 * To fix this, we should send the frame through TX processing
60 * again. However, it's not that simple, since the frame will
61 * have been software-encrypted (if applicable) already, and
62 * encrypting it again doesn't do much good. So to properly do
63 * that, we not only have to skip the actual 'raw' encryption
64 * (key selection etc. still has to be done!) but also the
65 * sequence number assignment since that impacts the crypto
66 * encapsulation, of course.
67 *
68 * Hence, for now, fix the bug by just dropping the frame.
69 */ 54 */
70 goto drop; 55 memset(&info->control, 0, sizeof(info->control));
56
57 info->control.jiffies = jiffies;
58 info->control.vif = &sta->sdata->vif;
59 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING |
60 IEEE80211_TX_INTFL_RETRANSMISSION;
71 61
72 sta->tx_filtered_count++; 62 sta->tx_filtered_count++;
73 63
@@ -122,7 +112,6 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
122 return; 112 return;
123 } 113 }
124 114
125 drop:
126#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 115#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
127 if (net_ratelimit()) 116 if (net_ratelimit())
128 printk(KERN_DEBUG "%s: dropped TX filtered frame, " 117 printk(KERN_DEBUG "%s: dropped TX filtered frame, "
@@ -134,6 +123,40 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
134 dev_kfree_skb(skb); 123 dev_kfree_skb(skb);
135} 124}
136 125
126static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
127{
128 struct ieee80211_mgmt *mgmt = (void *) skb->data;
129 struct ieee80211_local *local = sta->local;
130 struct ieee80211_sub_if_data *sdata = sta->sdata;
131
132 if (ieee80211_is_action(mgmt->frame_control) &&
133 sdata->vif.type == NL80211_IFTYPE_STATION &&
134 mgmt->u.action.category == WLAN_CATEGORY_HT &&
135 mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS) {
136 /*
137 * This update looks racy, but isn't -- if we come
138 * here we've definitely got a station that we're
139 * talking to, and on a managed interface that can
140 * only be the AP. And the only other place updating
141 * this variable is before we're associated.
142 */
143 switch (mgmt->u.action.u.ht_smps.smps_control) {
144 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
145 sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_DYNAMIC;
146 break;
147 case WLAN_HT_SMPS_CONTROL_STATIC:
148 sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_STATIC;
149 break;
150 case WLAN_HT_SMPS_CONTROL_DISABLED:
151 default: /* shouldn't happen since we don't send that */
152 sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_OFF;
153 break;
154 }
155
156 ieee80211_queue_work(&local->hw, &local->recalc_smps);
157 }
158}
159
137void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) 160void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
138{ 161{
139 struct sk_buff *skb2; 162 struct sk_buff *skb2;
@@ -146,7 +169,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
146 struct ieee80211_tx_status_rtap_hdr *rthdr; 169 struct ieee80211_tx_status_rtap_hdr *rthdr;
147 struct ieee80211_sub_if_data *sdata; 170 struct ieee80211_sub_if_data *sdata;
148 struct net_device *prev_dev = NULL; 171 struct net_device *prev_dev = NULL;
149 struct sta_info *sta; 172 struct sta_info *sta, *tmp;
150 int retry_count = -1, i; 173 int retry_count = -1, i;
151 bool injected; 174 bool injected;
152 175
@@ -165,10 +188,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
165 rcu_read_lock(); 188 rcu_read_lock();
166 189
167 sband = local->hw.wiphy->bands[info->band]; 190 sband = local->hw.wiphy->bands[info->band];
191 fc = hdr->frame_control;
168 192
169 sta = sta_info_get(local, hdr->addr1); 193 for_each_sta_info(local, hdr->addr1, sta, tmp) {
194 /* skip wrong virtual interface */
195 if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN))
196 continue;
170 197
171 if (sta) {
172 if (!(info->flags & IEEE80211_TX_STAT_ACK) && 198 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
173 test_sta_flags(sta, WLAN_STA_PS_STA)) { 199 test_sta_flags(sta, WLAN_STA_PS_STA)) {
174 /* 200 /*
@@ -180,8 +206,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
180 return; 206 return;
181 } 207 }
182 208
183 fc = hdr->frame_control;
184
185 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) && 209 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
186 (ieee80211_is_data_qos(fc))) { 210 (ieee80211_is_data_qos(fc))) {
187 u16 tid, ssn; 211 u16 tid, ssn;
@@ -208,6 +232,10 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
208 rate_control_tx_status(local, sband, sta, skb); 232 rate_control_tx_status(local, sband, sta, skb);
209 if (ieee80211_vif_is_mesh(&sta->sdata->vif)) 233 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
210 ieee80211s_update_metric(local, sta, skb); 234 ieee80211s_update_metric(local, sta, skb);
235
236 if (!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
237 (info->flags & IEEE80211_TX_STAT_ACK))
238 ieee80211_frame_acked(sta, skb);
211 } 239 }
212 240
213 rcu_read_unlock(); 241 rcu_read_unlock();
@@ -246,6 +274,25 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
246 local->dot11FailedCount++; 274 local->dot11FailedCount++;
247 } 275 }
248 276
277 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
278 (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
279 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
280 local->ps_sdata && !(local->scanning)) {
281 if (info->flags & IEEE80211_TX_STAT_ACK) {
282 local->ps_sdata->u.mgd.flags |=
283 IEEE80211_STA_NULLFUNC_ACKED;
284 ieee80211_queue_work(&local->hw,
285 &local->dynamic_ps_enable_work);
286 } else
287 mod_timer(&local->dynamic_ps_timer, jiffies +
288 msecs_to_jiffies(10));
289 }
290
291 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX)
292 cfg80211_action_tx_status(
293 skb->dev, (unsigned long) skb, skb->data, skb->len,
294 !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
295
249 /* this was a transmitted frame, but now we want to reuse it */ 296 /* this was a transmitted frame, but now we want to reuse it */
250 skb_orphan(skb); 297 skb_orphan(skb);
251 298
@@ -311,7 +358,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
311 rcu_read_lock(); 358 rcu_read_lock();
312 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 359 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
313 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { 360 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
314 if (!netif_running(sdata->dev)) 361 if (!ieee80211_sdata_running(sdata))
315 continue; 362 continue;
316 363
317 if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) && 364 if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 4921d724b6c7..7ef491e9d66d 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -100,7 +100,7 @@ static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx,
100 p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j)); 100 p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j));
101 p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i; 101 p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i;
102 } 102 }
103 ctx->initialized = 1; 103 ctx->state = TKIP_STATE_PHASE1_DONE;
104} 104}
105 105
106static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx, 106static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx,
@@ -183,7 +183,7 @@ void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
183 /* Update the p1k only when the iv16 in the packet wraps around, this 183 /* Update the p1k only when the iv16 in the packet wraps around, this
184 * might occur after the wrap around of iv16 in the key in case of 184 * might occur after the wrap around of iv16 in the key in case of
185 * fragmented packets. */ 185 * fragmented packets. */
186 if (iv16 == 0 || !ctx->initialized) 186 if (iv16 == 0 || ctx->state == TKIP_STATE_NOT_INIT)
187 tkip_mixing_phase1(tk, ctx, hdr->addr2, iv32); 187 tkip_mixing_phase1(tk, ctx, hdr->addr2, iv32);
188 188
189 if (type == IEEE80211_TKIP_P1_KEY) { 189 if (type == IEEE80211_TKIP_P1_KEY) {
@@ -195,11 +195,13 @@ void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
195} 195}
196EXPORT_SYMBOL(ieee80211_get_tkip_key); 196EXPORT_SYMBOL(ieee80211_get_tkip_key);
197 197
198/* Encrypt packet payload with TKIP using @key. @pos is a pointer to the 198/*
199 * Encrypt packet payload with TKIP using @key. @pos is a pointer to the
199 * beginning of the buffer containing payload. This payload must include 200 * beginning of the buffer containing payload. This payload must include
200 * headroom of eight octets for IV and Ext. IV and taildroom of four octets 201 * the IV/Ext.IV and space for (taildroom) four octets for ICV.
201 * for ICV. @payload_len is the length of payload (_not_ including extra 202 * @payload_len is the length of payload (_not_ including IV/ICV length).
202 * headroom and tailroom). @ta is the transmitter addresses. */ 203 * @ta is the transmitter addresses.
204 */
203void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, 205void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
204 struct ieee80211_key *key, 206 struct ieee80211_key *key,
205 u8 *pos, size_t payload_len, u8 *ta) 207 u8 *pos, size_t payload_len, u8 *ta)
@@ -209,12 +211,11 @@ void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
209 const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; 211 const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
210 212
211 /* Calculate per-packet key */ 213 /* Calculate per-packet key */
212 if (ctx->iv16 == 0 || !ctx->initialized) 214 if (ctx->iv16 == 0 || ctx->state == TKIP_STATE_NOT_INIT)
213 tkip_mixing_phase1(tk, ctx, ta, ctx->iv32); 215 tkip_mixing_phase1(tk, ctx, ta, ctx->iv32);
214 216
215 tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key); 217 tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key);
216 218
217 pos = ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16);
218 ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len); 219 ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len);
219} 220}
220 221
@@ -259,7 +260,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
259 if ((keyid >> 6) != key->conf.keyidx) 260 if ((keyid >> 6) != key->conf.keyidx)
260 return TKIP_DECRYPT_INVALID_KEYIDX; 261 return TKIP_DECRYPT_INVALID_KEYIDX;
261 262
262 if (key->u.tkip.rx[queue].initialized && 263 if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT &&
263 (iv32 < key->u.tkip.rx[queue].iv32 || 264 (iv32 < key->u.tkip.rx[queue].iv32 ||
264 (iv32 == key->u.tkip.rx[queue].iv32 && 265 (iv32 == key->u.tkip.rx[queue].iv32 &&
265 iv16 <= key->u.tkip.rx[queue].iv16))) { 266 iv16 <= key->u.tkip.rx[queue].iv16))) {
@@ -275,11 +276,11 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
275 276
276 if (only_iv) { 277 if (only_iv) {
277 res = TKIP_DECRYPT_OK; 278 res = TKIP_DECRYPT_OK;
278 key->u.tkip.rx[queue].initialized = 1; 279 key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED;
279 goto done; 280 goto done;
280 } 281 }
281 282
282 if (!key->u.tkip.rx[queue].initialized || 283 if (key->u.tkip.rx[queue].state == TKIP_STATE_NOT_INIT ||
283 key->u.tkip.rx[queue].iv32 != iv32) { 284 key->u.tkip.rx[queue].iv32 != iv32) {
284 /* IV16 wrapped around - perform TKIP phase 1 */ 285 /* IV16 wrapped around - perform TKIP phase 1 */
285 tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32); 286 tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32);
@@ -299,18 +300,18 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
299 printk("\n"); 300 printk("\n");
300 } 301 }
301#endif 302#endif
302 if (key->local->ops->update_tkip_key && 303 }
303 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 304 if (key->local->ops->update_tkip_key &&
304 static const u8 bcast[ETH_ALEN] = 305 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
305 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 306 key->u.tkip.rx[queue].state != TKIP_STATE_PHASE1_HW_UPLOADED) {
306 const u8 *sta_addr = key->sta->sta.addr; 307 struct ieee80211_sub_if_data *sdata = key->sdata;
307 308
308 if (is_multicast_ether_addr(ra)) 309 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
309 sta_addr = bcast; 310 sdata = container_of(key->sdata->bss,
310 311 struct ieee80211_sub_if_data, u.ap);
311 drv_update_tkip_key(key->local, &key->conf, sta_addr, 312 drv_update_tkip_key(key->local, sdata, &key->conf, key->sta,
312 iv32, key->u.tkip.rx[queue].p1k); 313 iv32, key->u.tkip.rx[queue].p1k);
313 } 314 key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED;
314 } 315 }
315 316
316 tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key); 317 tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8834cc93c716..cbe53ed4fb0b 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -180,6 +180,71 @@ static int inline is_ieee80211_device(struct ieee80211_local *local,
180} 180}
181 181
182/* tx handlers */ 182/* tx handlers */
183static ieee80211_tx_result debug_noinline
184ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
185{
186 struct ieee80211_local *local = tx->local;
187 struct ieee80211_if_managed *ifmgd;
188
189 /* driver doesn't support power save */
190 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
191 return TX_CONTINUE;
192
193 /* hardware does dynamic power save */
194 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
195 return TX_CONTINUE;
196
197 /* dynamic power save disabled */
198 if (local->hw.conf.dynamic_ps_timeout <= 0)
199 return TX_CONTINUE;
200
201 /* we are scanning, don't enable power save */
202 if (local->scanning)
203 return TX_CONTINUE;
204
205 if (!local->ps_sdata)
206 return TX_CONTINUE;
207
208 /* No point if we're going to suspend */
209 if (local->quiescing)
210 return TX_CONTINUE;
211
212 /* dynamic ps is supported only in managed mode */
213 if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
214 return TX_CONTINUE;
215
216 ifmgd = &tx->sdata->u.mgd;
217
218 /*
219 * Don't wakeup from power save if u-apsd is enabled, voip ac has
220 * u-apsd enabled and the frame is in voip class. This effectively
221 * means that even if all access categories have u-apsd enabled, in
222 * practise u-apsd is only used with the voip ac. This is a
223 * workaround for the case when received voip class packets do not
224 * have correct qos tag for some reason, due the network or the
225 * peer application.
226 *
227 * Note: local->uapsd_queues access is racy here. If the value is
228 * changed via debugfs, user needs to reassociate manually to have
229 * everything in sync.
230 */
231 if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
232 && (local->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
233 && skb_get_queue_mapping(tx->skb) == 0)
234 return TX_CONTINUE;
235
236 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
237 ieee80211_stop_queues_by_reason(&local->hw,
238 IEEE80211_QUEUE_STOP_REASON_PS);
239 ieee80211_queue_work(&local->hw,
240 &local->dynamic_ps_disable_work);
241 }
242
243 mod_timer(&local->dynamic_ps_timer, jiffies +
244 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
245
246 return TX_CONTINUE;
247}
183 248
184static ieee80211_tx_result debug_noinline 249static ieee80211_tx_result debug_noinline
185ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) 250ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
@@ -223,7 +288,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
223#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 288#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
224 printk(KERN_DEBUG "%s: dropped data frame to not " 289 printk(KERN_DEBUG "%s: dropped data frame to not "
225 "associated station %pM\n", 290 "associated station %pM\n",
226 tx->dev->name, hdr->addr1); 291 tx->sdata->name, hdr->addr1);
227#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 292#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
228 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); 293 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
229 return TX_DROP; 294 return TX_DROP;
@@ -331,7 +396,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
331#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 396#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
332 if (net_ratelimit()) 397 if (net_ratelimit())
333 printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n", 398 printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n",
334 tx->dev->name); 399 tx->sdata->name);
335#endif 400#endif
336 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); 401 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
337 } else 402 } else
@@ -391,7 +456,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
391 if (net_ratelimit()) { 456 if (net_ratelimit()) {
392 printk(KERN_DEBUG "%s: STA %pM TX " 457 printk(KERN_DEBUG "%s: STA %pM TX "
393 "buffer full - dropping oldest frame\n", 458 "buffer full - dropping oldest frame\n",
394 tx->dev->name, sta->sta.addr); 459 tx->sdata->name, sta->sta.addr);
395 } 460 }
396#endif 461#endif
397 dev_kfree_skb(old); 462 dev_kfree_skb(old);
@@ -416,7 +481,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
416#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 481#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
417 else if (unlikely(staflags & WLAN_STA_PS_STA)) { 482 else if (unlikely(staflags & WLAN_STA_PS_STA)) {
418 printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll " 483 printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll "
419 "set -> send frame\n", tx->dev->name, 484 "set -> send frame\n", tx->sdata->name,
420 sta->sta.addr); 485 sta->sta.addr);
421 } 486 }
422#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 487#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
@@ -464,6 +529,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
464 tx->key = NULL; 529 tx->key = NULL;
465 530
466 if (tx->key) { 531 if (tx->key) {
532 bool skip_hw = false;
533
467 tx->key->tx_rx_count++; 534 tx->key->tx_rx_count++;
468 /* TODO: add threshold stuff again */ 535 /* TODO: add threshold stuff again */
469 536
@@ -480,16 +547,32 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
480 !ieee80211_use_mfp(hdr->frame_control, tx->sta, 547 !ieee80211_use_mfp(hdr->frame_control, tx->sta,
481 tx->skb)) 548 tx->skb))
482 tx->key = NULL; 549 tx->key = NULL;
550 else
551 skip_hw = (tx->key->conf.flags &
552 IEEE80211_KEY_FLAG_SW_MGMT) &&
553 ieee80211_is_mgmt(hdr->frame_control);
483 break; 554 break;
484 case ALG_AES_CMAC: 555 case ALG_AES_CMAC:
485 if (!ieee80211_is_mgmt(hdr->frame_control)) 556 if (!ieee80211_is_mgmt(hdr->frame_control))
486 tx->key = NULL; 557 tx->key = NULL;
487 break; 558 break;
488 } 559 }
560
561 if (!skip_hw && tx->key &&
562 tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
563 info->control.hw_key = &tx->key->conf;
489 } 564 }
490 565
491 if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) 566 return TX_CONTINUE;
492 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 567}
568
569static ieee80211_tx_result debug_noinline
570ieee80211_tx_h_sta(struct ieee80211_tx_data *tx)
571{
572 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
573
574 if (tx->sta && tx->sta->uploaded)
575 info->control.sta = &tx->sta->sta;
493 576
494 return TX_CONTINUE; 577 return TX_CONTINUE;
495} 578}
@@ -519,7 +602,12 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
519 txrc.bss_conf = &tx->sdata->vif.bss_conf; 602 txrc.bss_conf = &tx->sdata->vif.bss_conf;
520 txrc.skb = tx->skb; 603 txrc.skb = tx->skb;
521 txrc.reported_rate.idx = -1; 604 txrc.reported_rate.idx = -1;
522 txrc.max_rate_idx = tx->sdata->max_ratectrl_rateidx; 605 txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[tx->channel->band];
606 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
607 txrc.max_rate_idx = -1;
608 else
609 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
610 txrc.ap = tx->sdata->vif.type == NL80211_IFTYPE_AP;
523 611
524 /* set up RTS protection if desired */ 612 /* set up RTS protection if desired */
525 if (len > tx->local->hw.wiphy->rts_threshold) { 613 if (len > tx->local->hw.wiphy->rts_threshold) {
@@ -549,7 +637,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
549 "%s: Dropped data frame as no usable bitrate found while " 637 "%s: Dropped data frame as no usable bitrate found while "
550 "scanning and associated. Target station: " 638 "scanning and associated. Target station: "
551 "%pM on %d GHz band\n", 639 "%pM on %d GHz band\n",
552 tx->dev->name, hdr->addr1, 640 tx->sdata->name, hdr->addr1,
553 tx->channel->band ? 5 : 2)) 641 tx->channel->band ? 5 : 2))
554 return TX_DROP; 642 return TX_DROP;
555 643
@@ -664,17 +752,6 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
664} 752}
665 753
666static ieee80211_tx_result debug_noinline 754static ieee80211_tx_result debug_noinline
667ieee80211_tx_h_misc(struct ieee80211_tx_data *tx)
668{
669 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
670
671 if (tx->sta)
672 info->control.sta = &tx->sta->sta;
673
674 return TX_CONTINUE;
675}
676
677static ieee80211_tx_result debug_noinline
678ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) 755ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
679{ 756{
680 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 757 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
@@ -933,7 +1010,8 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
933 (struct ieee80211_radiotap_header *) skb->data; 1010 (struct ieee80211_radiotap_header *) skb->data;
934 struct ieee80211_supported_band *sband; 1011 struct ieee80211_supported_band *sband;
935 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1012 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
936 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); 1013 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
1014 NULL);
937 1015
938 sband = tx->local->hw.wiphy->bands[tx->channel->band]; 1016 sband = tx->local->hw.wiphy->bands[tx->channel->band];
939 1017
@@ -969,7 +1047,7 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
969 * because it will be recomputed and added 1047 * because it will be recomputed and added
970 * on transmission 1048 * on transmission
971 */ 1049 */
972 if (skb->len < (iterator.max_length + FCS_LEN)) 1050 if (skb->len < (iterator._max_length + FCS_LEN))
973 return false; 1051 return false;
974 1052
975 skb_trim(skb, skb->len - FCS_LEN); 1053 skb_trim(skb, skb->len - FCS_LEN);
@@ -996,10 +1074,10 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
996 1074
997 /* 1075 /*
998 * remove the radiotap header 1076 * remove the radiotap header
999 * iterator->max_length was sanity-checked against 1077 * iterator->_max_length was sanity-checked against
1000 * skb->len by iterator init 1078 * skb->len by iterator init
1001 */ 1079 */
1002 skb_pull(skb, iterator.max_length); 1080 skb_pull(skb, iterator._max_length);
1003 1081
1004 return true; 1082 return true;
1005} 1083}
@@ -1021,7 +1099,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1021 1099
1022 memset(tx, 0, sizeof(*tx)); 1100 memset(tx, 0, sizeof(*tx));
1023 tx->skb = skb; 1101 tx->skb = skb;
1024 tx->dev = sdata->dev; /* use original interface */
1025 tx->local = local; 1102 tx->local = local;
1026 tx->sdata = sdata; 1103 tx->sdata = sdata;
1027 tx->channel = local->hw.conf.channel; 1104 tx->channel = local->hw.conf.channel;
@@ -1032,7 +1109,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1032 tx->flags |= IEEE80211_TX_FRAGMENTED; 1109 tx->flags |= IEEE80211_TX_FRAGMENTED;
1033 1110
1034 /* process and remove the injection radiotap header */ 1111 /* process and remove the injection radiotap header */
1035 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) { 1112 if (unlikely(info->flags & IEEE80211_TX_INTFL_HAS_RADIOTAP)) {
1036 if (!__ieee80211_parse_tx_radiotap(tx, skb)) 1113 if (!__ieee80211_parse_tx_radiotap(tx, skb))
1037 return TX_DROP; 1114 return TX_DROP;
1038 1115
@@ -1041,6 +1118,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1041 * the radiotap header that was present and pre-filled 1118 * the radiotap header that was present and pre-filled
1042 * 'tx' with tx control information. 1119 * 'tx' with tx control information.
1043 */ 1120 */
1121 info->flags &= ~IEEE80211_TX_INTFL_HAS_RADIOTAP;
1044 } 1122 }
1045 1123
1046 /* 1124 /*
@@ -1052,10 +1130,15 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1052 1130
1053 hdr = (struct ieee80211_hdr *) skb->data; 1131 hdr = (struct ieee80211_hdr *) skb->data;
1054 1132
1055 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1133 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
1056 tx->sta = rcu_dereference(sdata->u.vlan.sta); 1134 tx->sta = rcu_dereference(sdata->u.vlan.sta);
1135 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
1136 return TX_DROP;
1137 } else if (info->flags & IEEE80211_TX_CTL_INJECTED) {
1138 tx->sta = sta_info_get_bss(sdata, hdr->addr1);
1139 }
1057 if (!tx->sta) 1140 if (!tx->sta)
1058 tx->sta = sta_info_get(local, hdr->addr1); 1141 tx->sta = sta_info_get(sdata, hdr->addr1);
1059 1142
1060 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && 1143 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
1061 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { 1144 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
@@ -1207,6 +1290,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
1207static int invoke_tx_handlers(struct ieee80211_tx_data *tx) 1290static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1208{ 1291{
1209 struct sk_buff *skb = tx->skb; 1292 struct sk_buff *skb = tx->skb;
1293 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1210 ieee80211_tx_result res = TX_DROP; 1294 ieee80211_tx_result res = TX_DROP;
1211 1295
1212#define CALL_TXH(txh) \ 1296#define CALL_TXH(txh) \
@@ -1216,13 +1300,18 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1216 goto txh_done; \ 1300 goto txh_done; \
1217 } while (0) 1301 } while (0)
1218 1302
1303 CALL_TXH(ieee80211_tx_h_dynamic_ps);
1219 CALL_TXH(ieee80211_tx_h_check_assoc); 1304 CALL_TXH(ieee80211_tx_h_check_assoc);
1220 CALL_TXH(ieee80211_tx_h_ps_buf); 1305 CALL_TXH(ieee80211_tx_h_ps_buf);
1221 CALL_TXH(ieee80211_tx_h_select_key); 1306 CALL_TXH(ieee80211_tx_h_select_key);
1222 CALL_TXH(ieee80211_tx_h_michael_mic_add); 1307 CALL_TXH(ieee80211_tx_h_sta);
1223 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)) 1308 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
1224 CALL_TXH(ieee80211_tx_h_rate_ctrl); 1309 CALL_TXH(ieee80211_tx_h_rate_ctrl);
1225 CALL_TXH(ieee80211_tx_h_misc); 1310
1311 if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION))
1312 goto txh_done;
1313
1314 CALL_TXH(ieee80211_tx_h_michael_mic_add);
1226 CALL_TXH(ieee80211_tx_h_sequence); 1315 CALL_TXH(ieee80211_tx_h_sequence);
1227 CALL_TXH(ieee80211_tx_h_fragment); 1316 CALL_TXH(ieee80211_tx_h_fragment);
1228 /* handlers after fragment must be aware of tx info fragmentation! */ 1317 /* handlers after fragment must be aware of tx info fragmentation! */
@@ -1398,30 +1487,6 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
1398 return 0; 1487 return 0;
1399} 1488}
1400 1489
1401static bool need_dynamic_ps(struct ieee80211_local *local)
1402{
1403 /* driver doesn't support power save */
1404 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
1405 return false;
1406
1407 /* hardware does dynamic power save */
1408 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
1409 return false;
1410
1411 /* dynamic power save disabled */
1412 if (local->hw.conf.dynamic_ps_timeout <= 0)
1413 return false;
1414
1415 /* we are scanning, don't enable power save */
1416 if (local->scanning)
1417 return false;
1418
1419 if (!local->ps_sdata)
1420 return false;
1421
1422 return true;
1423}
1424
1425static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, 1490static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1426 struct sk_buff *skb) 1491 struct sk_buff *skb)
1427{ 1492{
@@ -1432,25 +1497,14 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1432 int headroom; 1497 int headroom;
1433 bool may_encrypt; 1498 bool may_encrypt;
1434 1499
1435 if (need_dynamic_ps(local)) {
1436 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
1437 ieee80211_stop_queues_by_reason(&local->hw,
1438 IEEE80211_QUEUE_STOP_REASON_PS);
1439 ieee80211_queue_work(&local->hw,
1440 &local->dynamic_ps_disable_work);
1441 }
1442
1443 mod_timer(&local->dynamic_ps_timer, jiffies +
1444 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
1445 }
1446
1447 rcu_read_lock(); 1500 rcu_read_lock();
1448 1501
1449 if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) { 1502 if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
1450 int hdrlen; 1503 int hdrlen;
1451 u16 len_rthdr; 1504 u16 len_rthdr;
1452 1505
1453 info->flags |= IEEE80211_TX_CTL_INJECTED; 1506 info->flags |= IEEE80211_TX_CTL_INJECTED |
1507 IEEE80211_TX_INTFL_HAS_RADIOTAP;
1454 1508
1455 len_rthdr = ieee80211_get_radiotap_len(skb->data); 1509 len_rthdr = ieee80211_get_radiotap_len(skb->data);
1456 hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr); 1510 hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
@@ -1470,11 +1524,11 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1470 1524
1471 list_for_each_entry_rcu(tmp_sdata, &local->interfaces, 1525 list_for_each_entry_rcu(tmp_sdata, &local->interfaces,
1472 list) { 1526 list) {
1473 if (!netif_running(tmp_sdata->dev)) 1527 if (!ieee80211_sdata_running(tmp_sdata))
1474 continue; 1528 continue;
1475 if (tmp_sdata->vif.type != NL80211_IFTYPE_AP) 1529 if (tmp_sdata->vif.type != NL80211_IFTYPE_AP)
1476 continue; 1530 continue;
1477 if (compare_ether_addr(tmp_sdata->dev->dev_addr, 1531 if (compare_ether_addr(tmp_sdata->vif.addr,
1478 hdr->addr2) == 0) { 1532 hdr->addr2) == 0) {
1479 sdata = tmp_sdata; 1533 sdata = tmp_sdata;
1480 break; 1534 break;
@@ -1508,7 +1562,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1508 return; 1562 return;
1509 } 1563 }
1510 1564
1511 ieee80211_select_queue(local, skb); 1565 ieee80211_set_qos_hdr(local, skb);
1512 ieee80211_tx(sdata, skb, false); 1566 ieee80211_tx(sdata, skb, false);
1513 rcu_read_unlock(); 1567 rcu_read_unlock();
1514} 1568}
@@ -1638,7 +1692,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1638 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1692 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1639 /* RA TA DA SA */ 1693 /* RA TA DA SA */
1640 memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN); 1694 memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
1641 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1695 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1642 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1696 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1643 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1697 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1644 hdrlen = 30; 1698 hdrlen = 30;
@@ -1652,7 +1706,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1652 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); 1706 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
1653 /* DA BSSID SA */ 1707 /* DA BSSID SA */
1654 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1708 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1655 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1709 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1656 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); 1710 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
1657 hdrlen = 24; 1711 hdrlen = 24;
1658 break; 1712 break;
@@ -1660,7 +1714,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1660 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1714 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1661 /* RA TA DA SA */ 1715 /* RA TA DA SA */
1662 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); 1716 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
1663 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1717 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1664 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1718 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1665 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1719 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1666 hdrlen = 30; 1720 hdrlen = 30;
@@ -1674,8 +1728,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1674 goto fail; 1728 goto fail;
1675 } 1729 }
1676 1730
1677 if (compare_ether_addr(dev->dev_addr, 1731 if (compare_ether_addr(sdata->vif.addr,
1678 skb->data + ETH_ALEN) == 0) { 1732 skb->data + ETH_ALEN) == 0) {
1679 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1733 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1680 skb->data, skb->data + ETH_ALEN); 1734 skb->data, skb->data + ETH_ALEN);
1681 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, 1735 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
@@ -1705,7 +1759,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1705 } 1759 }
1706 } 1760 }
1707 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1761 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1708 mesh_da, dev->dev_addr); 1762 mesh_da, sdata->vif.addr);
1709 rcu_read_unlock(); 1763 rcu_read_unlock();
1710 if (is_mesh_mcast) 1764 if (is_mesh_mcast)
1711 meshhdrlen = 1765 meshhdrlen =
@@ -1730,7 +1784,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1730 if (sdata->u.mgd.use_4addr && ethertype != ETH_P_PAE) { 1784 if (sdata->u.mgd.use_4addr && ethertype != ETH_P_PAE) {
1731 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1785 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1732 /* RA TA DA SA */ 1786 /* RA TA DA SA */
1733 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1787 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1734 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1788 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1735 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1789 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1736 hdrlen = 30; 1790 hdrlen = 30;
@@ -1761,9 +1815,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1761 */ 1815 */
1762 if (!is_multicast_ether_addr(hdr.addr1)) { 1816 if (!is_multicast_ether_addr(hdr.addr1)) {
1763 rcu_read_lock(); 1817 rcu_read_lock();
1764 sta = sta_info_get(local, hdr.addr1); 1818 sta = sta_info_get(sdata, hdr.addr1);
1765 /* XXX: in the future, use sdata to look up the sta */ 1819 if (sta)
1766 if (sta && sta->sdata == sdata)
1767 sta_flags = get_sta_flags(sta); 1820 sta_flags = get_sta_flags(sta);
1768 rcu_read_unlock(); 1821 rcu_read_unlock();
1769 } 1822 }
@@ -1782,7 +1835,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1782 unlikely(!is_multicast_ether_addr(hdr.addr1) && 1835 unlikely(!is_multicast_ether_addr(hdr.addr1) &&
1783 !(sta_flags & WLAN_STA_AUTHORIZED) && 1836 !(sta_flags & WLAN_STA_AUTHORIZED) &&
1784 !(ethertype == ETH_P_PAE && 1837 !(ethertype == ETH_P_PAE &&
1785 compare_ether_addr(dev->dev_addr, 1838 compare_ether_addr(sdata->vif.addr,
1786 skb->data + ETH_ALEN) == 0))) { 1839 skb->data + ETH_ALEN) == 0))) {
1787#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1840#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1788 if (net_ratelimit()) 1841 if (net_ratelimit())
@@ -1922,7 +1975,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
1922 ieee80211_tx(sdata, skb, true); 1975 ieee80211_tx(sdata, skb, true);
1923 } else { 1976 } else {
1924 hdr = (struct ieee80211_hdr *)skb->data; 1977 hdr = (struct ieee80211_hdr *)skb->data;
1925 sta = sta_info_get(local, hdr->addr1); 1978 sta = sta_info_get(sdata, hdr->addr1);
1926 1979
1927 ret = __ieee80211_tx(local, &skb, sta, true); 1980 ret = __ieee80211_tx(local, &skb, sta, true);
1928 if (ret != IEEE80211_TX_OK) 1981 if (ret != IEEE80211_TX_OK)
@@ -2058,6 +2111,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2058 struct beacon_data *beacon; 2111 struct beacon_data *beacon;
2059 struct ieee80211_supported_band *sband; 2112 struct ieee80211_supported_band *sband;
2060 enum ieee80211_band band = local->hw.conf.channel->band; 2113 enum ieee80211_band band = local->hw.conf.channel->band;
2114 struct ieee80211_tx_rate_control txrc;
2061 2115
2062 sband = local->hw.wiphy->bands[band]; 2116 sband = local->hw.wiphy->bands[band];
2063 2117
@@ -2146,8 +2200,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2146 mgmt->frame_control = 2200 mgmt->frame_control =
2147 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); 2201 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
2148 memset(mgmt->da, 0xff, ETH_ALEN); 2202 memset(mgmt->da, 0xff, ETH_ALEN);
2149 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 2203 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
2150 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 2204 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
2151 mgmt->u.beacon.beacon_int = 2205 mgmt->u.beacon.beacon_int =
2152 cpu_to_le16(sdata->vif.bss_conf.beacon_int); 2206 cpu_to_le16(sdata->vif.bss_conf.beacon_int);
2153 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */ 2207 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */
@@ -2165,21 +2219,25 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2165 info = IEEE80211_SKB_CB(skb); 2219 info = IEEE80211_SKB_CB(skb);
2166 2220
2167 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 2221 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
2222 info->flags |= IEEE80211_TX_CTL_NO_ACK;
2168 info->band = band; 2223 info->band = band;
2169 /* 2224
2170 * XXX: For now, always use the lowest rate 2225 memset(&txrc, 0, sizeof(txrc));
2171 */ 2226 txrc.hw = hw;
2172 info->control.rates[0].idx = 0; 2227 txrc.sband = sband;
2173 info->control.rates[0].count = 1; 2228 txrc.bss_conf = &sdata->vif.bss_conf;
2174 info->control.rates[1].idx = -1; 2229 txrc.skb = skb;
2175 info->control.rates[2].idx = -1; 2230 txrc.reported_rate.idx = -1;
2176 info->control.rates[3].idx = -1; 2231 txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
2177 info->control.rates[4].idx = -1; 2232 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
2178 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5); 2233 txrc.max_rate_idx = -1;
2234 else
2235 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
2236 txrc.ap = true;
2237 rate_control_get_rate(sdata, NULL, &txrc);
2179 2238
2180 info->control.vif = vif; 2239 info->control.vif = vif;
2181 2240
2182 info->flags |= IEEE80211_TX_CTL_NO_ACK;
2183 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 2241 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
2184 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; 2242 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
2185 out: 2243 out:
@@ -2188,6 +2246,134 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2188} 2246}
2189EXPORT_SYMBOL(ieee80211_beacon_get_tim); 2247EXPORT_SYMBOL(ieee80211_beacon_get_tim);
2190 2248
2249struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
2250 struct ieee80211_vif *vif)
2251{
2252 struct ieee80211_sub_if_data *sdata;
2253 struct ieee80211_if_managed *ifmgd;
2254 struct ieee80211_pspoll *pspoll;
2255 struct ieee80211_local *local;
2256 struct sk_buff *skb;
2257
2258 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
2259 return NULL;
2260
2261 sdata = vif_to_sdata(vif);
2262 ifmgd = &sdata->u.mgd;
2263 local = sdata->local;
2264
2265 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
2266 if (!skb) {
2267 printk(KERN_DEBUG "%s: failed to allocate buffer for "
2268 "pspoll template\n", sdata->name);
2269 return NULL;
2270 }
2271 skb_reserve(skb, local->hw.extra_tx_headroom);
2272
2273 pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
2274 memset(pspoll, 0, sizeof(*pspoll));
2275 pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
2276 IEEE80211_STYPE_PSPOLL);
2277 pspoll->aid = cpu_to_le16(ifmgd->aid);
2278
2279 /* aid in PS-Poll has its two MSBs each set to 1 */
2280 pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
2281
2282 memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
2283 memcpy(pspoll->ta, vif->addr, ETH_ALEN);
2284
2285 return skb;
2286}
2287EXPORT_SYMBOL(ieee80211_pspoll_get);
2288
2289struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
2290 struct ieee80211_vif *vif)
2291{
2292 struct ieee80211_hdr_3addr *nullfunc;
2293 struct ieee80211_sub_if_data *sdata;
2294 struct ieee80211_if_managed *ifmgd;
2295 struct ieee80211_local *local;
2296 struct sk_buff *skb;
2297
2298 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
2299 return NULL;
2300
2301 sdata = vif_to_sdata(vif);
2302 ifmgd = &sdata->u.mgd;
2303 local = sdata->local;
2304
2305 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
2306 if (!skb) {
2307 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
2308 "template\n", sdata->name);
2309 return NULL;
2310 }
2311 skb_reserve(skb, local->hw.extra_tx_headroom);
2312
2313 nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb,
2314 sizeof(*nullfunc));
2315 memset(nullfunc, 0, sizeof(*nullfunc));
2316 nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
2317 IEEE80211_STYPE_NULLFUNC |
2318 IEEE80211_FCTL_TODS);
2319 memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
2320 memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
2321 memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
2322
2323 return skb;
2324}
2325EXPORT_SYMBOL(ieee80211_nullfunc_get);
2326
2327struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
2328 struct ieee80211_vif *vif,
2329 const u8 *ssid, size_t ssid_len,
2330 const u8 *ie, size_t ie_len)
2331{
2332 struct ieee80211_sub_if_data *sdata;
2333 struct ieee80211_local *local;
2334 struct ieee80211_hdr_3addr *hdr;
2335 struct sk_buff *skb;
2336 size_t ie_ssid_len;
2337 u8 *pos;
2338
2339 sdata = vif_to_sdata(vif);
2340 local = sdata->local;
2341 ie_ssid_len = 2 + ssid_len;
2342
2343 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
2344 ie_ssid_len + ie_len);
2345 if (!skb) {
2346 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
2347 "request template\n", sdata->name);
2348 return NULL;
2349 }
2350
2351 skb_reserve(skb, local->hw.extra_tx_headroom);
2352
2353 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
2354 memset(hdr, 0, sizeof(*hdr));
2355 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2356 IEEE80211_STYPE_PROBE_REQ);
2357 memset(hdr->addr1, 0xff, ETH_ALEN);
2358 memcpy(hdr->addr2, vif->addr, ETH_ALEN);
2359 memset(hdr->addr3, 0xff, ETH_ALEN);
2360
2361 pos = skb_put(skb, ie_ssid_len);
2362 *pos++ = WLAN_EID_SSID;
2363 *pos++ = ssid_len;
2364 if (ssid)
2365 memcpy(pos, ssid, ssid_len);
2366 pos += ssid_len;
2367
2368 if (ie) {
2369 pos = skb_put(skb, ie_len);
2370 memcpy(pos, ie, ie_len);
2371 }
2372
2373 return skb;
2374}
2375EXPORT_SYMBOL(ieee80211_probereq_get);
2376
2191void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2377void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2192 const void *frame, size_t frame_len, 2378 const void *frame, size_t frame_len,
2193 const struct ieee80211_tx_info *frame_txctl, 2379 const struct ieee80211_tx_info *frame_txctl,
@@ -2287,6 +2473,9 @@ void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
2287 skb_set_network_header(skb, 0); 2473 skb_set_network_header(skb, 0);
2288 skb_set_transport_header(skb, 0); 2474 skb_set_transport_header(skb, 0);
2289 2475
2476 /* send all internal mgmt frames on VO */
2477 skb_set_queue_mapping(skb, 0);
2478
2290 /* 2479 /*
2291 * The other path calling ieee80211_xmit is from the tasklet, 2480 * The other path calling ieee80211_xmit is from the tasklet,
2292 * and while we can handle concurrent transmissions locking 2481 * and while we can handle concurrent transmissions locking
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 78a6e924c7e1..c453226f06b2 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -18,7 +18,6 @@
18#include <linux/skbuff.h> 18#include <linux/skbuff.h>
19#include <linux/etherdevice.h> 19#include <linux/etherdevice.h>
20#include <linux/if_arp.h> 20#include <linux/if_arp.h>
21#include <linux/wireless.h>
22#include <linux/bitmap.h> 21#include <linux/bitmap.h>
23#include <linux/crc32.h> 22#include <linux/crc32.h>
24#include <net/net_namespace.h> 23#include <net/net_namespace.h>
@@ -269,6 +268,7 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
269 enum queue_stop_reason reason) 268 enum queue_stop_reason reason)
270{ 269{
271 struct ieee80211_local *local = hw_to_local(hw); 270 struct ieee80211_local *local = hw_to_local(hw);
271 struct ieee80211_sub_if_data *sdata;
272 272
273 if (WARN_ON(queue >= hw->queues)) 273 if (WARN_ON(queue >= hw->queues))
274 return; 274 return;
@@ -281,6 +281,11 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
281 281
282 if (!skb_queue_empty(&local->pending[queue])) 282 if (!skb_queue_empty(&local->pending[queue]))
283 tasklet_schedule(&local->tx_pending_tasklet); 283 tasklet_schedule(&local->tx_pending_tasklet);
284
285 rcu_read_lock();
286 list_for_each_entry_rcu(sdata, &local->interfaces, list)
287 netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
288 rcu_read_unlock();
284} 289}
285 290
286void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, 291void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -305,11 +310,17 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
305 enum queue_stop_reason reason) 310 enum queue_stop_reason reason)
306{ 311{
307 struct ieee80211_local *local = hw_to_local(hw); 312 struct ieee80211_local *local = hw_to_local(hw);
313 struct ieee80211_sub_if_data *sdata;
308 314
309 if (WARN_ON(queue >= hw->queues)) 315 if (WARN_ON(queue >= hw->queues))
310 return; 316 return;
311 317
312 __set_bit(reason, &local->queue_stop_reasons[queue]); 318 __set_bit(reason, &local->queue_stop_reasons[queue]);
319
320 rcu_read_lock();
321 list_for_each_entry_rcu(sdata, &local->interfaces, list)
322 netif_tx_stop_queue(netdev_get_tx_queue(sdata->dev, queue));
323 rcu_read_unlock();
313} 324}
314 325
315void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, 326void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -468,8 +479,8 @@ void ieee80211_iterate_active_interfaces(
468 case NL80211_IFTYPE_MESH_POINT: 479 case NL80211_IFTYPE_MESH_POINT:
469 break; 480 break;
470 } 481 }
471 if (netif_running(sdata->dev)) 482 if (ieee80211_sdata_running(sdata))
472 iterator(data, sdata->dev->dev_addr, 483 iterator(data, sdata->vif.addr,
473 &sdata->vif); 484 &sdata->vif);
474 } 485 }
475 486
@@ -502,8 +513,8 @@ void ieee80211_iterate_active_interfaces_atomic(
502 case NL80211_IFTYPE_MESH_POINT: 513 case NL80211_IFTYPE_MESH_POINT:
503 break; 514 break;
504 } 515 }
505 if (netif_running(sdata->dev)) 516 if (ieee80211_sdata_running(sdata))
506 iterator(data, sdata->dev->dev_addr, 517 iterator(data, sdata->vif.addr,
507 &sdata->vif); 518 &sdata->vif);
508 } 519 }
509 520
@@ -781,6 +792,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
781 break; 792 break;
782 } 793 }
783 794
795 qparam.uapsd = false;
796
784 drv_conf_tx(local, queue, &qparam); 797 drv_conf_tx(local, queue, &qparam);
785 } 798 }
786} 799}
@@ -848,7 +861,7 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
848 sizeof(*mgmt) + 6 + extra_len); 861 sizeof(*mgmt) + 6 + extra_len);
849 if (!skb) { 862 if (!skb) {
850 printk(KERN_DEBUG "%s: failed to allocate buffer for auth " 863 printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
851 "frame\n", sdata->dev->name); 864 "frame\n", sdata->name);
852 return; 865 return;
853 } 866 }
854 skb_reserve(skb, local->hw.extra_tx_headroom); 867 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -858,7 +871,7 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
858 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 871 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
859 IEEE80211_STYPE_AUTH); 872 IEEE80211_STYPE_AUTH);
860 memcpy(mgmt->da, bssid, ETH_ALEN); 873 memcpy(mgmt->da, bssid, ETH_ALEN);
861 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 874 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
862 memcpy(mgmt->bssid, bssid, ETH_ALEN); 875 memcpy(mgmt->bssid, bssid, ETH_ALEN);
863 mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg); 876 mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg);
864 mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); 877 mgmt->u.auth.auth_transaction = cpu_to_le16(transaction);
@@ -881,43 +894,87 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
881 enum ieee80211_band band) 894 enum ieee80211_band band)
882{ 895{
883 struct ieee80211_supported_band *sband; 896 struct ieee80211_supported_band *sband;
884 u8 *pos, *supp_rates_len, *esupp_rates_len = NULL; 897 u8 *pos;
885 int i; 898 size_t offset = 0, noffset;
899 int supp_rates_len, i;
886 900
887 sband = local->hw.wiphy->bands[band]; 901 sband = local->hw.wiphy->bands[band];
888 902
889 pos = buffer; 903 pos = buffer;
890 904
905 supp_rates_len = min_t(int, sband->n_bitrates, 8);
906
891 *pos++ = WLAN_EID_SUPP_RATES; 907 *pos++ = WLAN_EID_SUPP_RATES;
892 supp_rates_len = pos; 908 *pos++ = supp_rates_len;
893 *pos++ = 0;
894
895 for (i = 0; i < sband->n_bitrates; i++) {
896 struct ieee80211_rate *rate = &sband->bitrates[i];
897
898 if (esupp_rates_len) {
899 *esupp_rates_len += 1;
900 } else if (*supp_rates_len == 8) {
901 *pos++ = WLAN_EID_EXT_SUPP_RATES;
902 esupp_rates_len = pos;
903 *pos++ = 1;
904 } else
905 *supp_rates_len += 1;
906 909
907 *pos++ = rate->bitrate / 5; 910 for (i = 0; i < supp_rates_len; i++) {
911 int rate = sband->bitrates[i].bitrate;
912 *pos++ = (u8) (rate / 5);
913 }
914
915 /* insert "request information" if in custom IEs */
916 if (ie && ie_len) {
917 static const u8 before_extrates[] = {
918 WLAN_EID_SSID,
919 WLAN_EID_SUPP_RATES,
920 WLAN_EID_REQUEST,
921 };
922 noffset = ieee80211_ie_split(ie, ie_len,
923 before_extrates,
924 ARRAY_SIZE(before_extrates),
925 offset);
926 memcpy(pos, ie + offset, noffset - offset);
927 pos += noffset - offset;
928 offset = noffset;
929 }
930
931 if (sband->n_bitrates > i) {
932 *pos++ = WLAN_EID_EXT_SUPP_RATES;
933 *pos++ = sband->n_bitrates - i;
934
935 for (; i < sband->n_bitrates; i++) {
936 int rate = sband->bitrates[i].bitrate;
937 *pos++ = (u8) (rate / 5);
938 }
939 }
940
941 /* insert custom IEs that go before HT */
942 if (ie && ie_len) {
943 static const u8 before_ht[] = {
944 WLAN_EID_SSID,
945 WLAN_EID_SUPP_RATES,
946 WLAN_EID_REQUEST,
947 WLAN_EID_EXT_SUPP_RATES,
948 WLAN_EID_DS_PARAMS,
949 WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
950 };
951 noffset = ieee80211_ie_split(ie, ie_len,
952 before_ht, ARRAY_SIZE(before_ht),
953 offset);
954 memcpy(pos, ie + offset, noffset - offset);
955 pos += noffset - offset;
956 offset = noffset;
908 } 957 }
909 958
910 if (sband->ht_cap.ht_supported) { 959 if (sband->ht_cap.ht_supported) {
911 __le16 tmp = cpu_to_le16(sband->ht_cap.cap); 960 u16 cap = sband->ht_cap.cap;
961 __le16 tmp;
962
963 if (ieee80211_disable_40mhz_24ghz &&
964 sband->band == IEEE80211_BAND_2GHZ) {
965 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
966 cap &= ~IEEE80211_HT_CAP_SGI_40;
967 }
912 968
913 *pos++ = WLAN_EID_HT_CAPABILITY; 969 *pos++ = WLAN_EID_HT_CAPABILITY;
914 *pos++ = sizeof(struct ieee80211_ht_cap); 970 *pos++ = sizeof(struct ieee80211_ht_cap);
915 memset(pos, 0, sizeof(struct ieee80211_ht_cap)); 971 memset(pos, 0, sizeof(struct ieee80211_ht_cap));
972 tmp = cpu_to_le16(cap);
916 memcpy(pos, &tmp, sizeof(u16)); 973 memcpy(pos, &tmp, sizeof(u16));
917 pos += sizeof(u16); 974 pos += sizeof(u16);
918 /* TODO: needs a define here for << 2 */
919 *pos++ = sband->ht_cap.ampdu_factor | 975 *pos++ = sband->ht_cap.ampdu_factor |
920 (sband->ht_cap.ampdu_density << 2); 976 (sband->ht_cap.ampdu_density <<
977 IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
921 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); 978 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
922 pos += sizeof(sband->ht_cap.mcs); 979 pos += sizeof(sband->ht_cap.mcs);
923 pos += 2 + 4 + 1; /* ext info, BF cap, antsel */ 980 pos += 2 + 4 + 1; /* ext info, BF cap, antsel */
@@ -928,9 +985,11 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
928 * that calculates local->scan_ies_len. 985 * that calculates local->scan_ies_len.
929 */ 986 */
930 987
931 if (ie) { 988 /* add any remaining custom IEs */
932 memcpy(pos, ie, ie_len); 989 if (ie && ie_len) {
933 pos += ie_len; 990 noffset = ie_len;
991 memcpy(pos, ie + offset, noffset - offset);
992 pos += noffset - offset;
934 } 993 }
935 994
936 return pos - buffer; 995 return pos - buffer;
@@ -943,40 +1002,33 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
943 struct ieee80211_local *local = sdata->local; 1002 struct ieee80211_local *local = sdata->local;
944 struct sk_buff *skb; 1003 struct sk_buff *skb;
945 struct ieee80211_mgmt *mgmt; 1004 struct ieee80211_mgmt *mgmt;
946 u8 *pos; 1005 size_t buf_len;
947 1006 u8 *buf;
948 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200 + 1007
949 ie_len); 1008 /* FIXME: come up with a proper value */
950 if (!skb) { 1009 buf = kmalloc(200 + ie_len, GFP_KERNEL);
951 printk(KERN_DEBUG "%s: failed to allocate buffer for probe " 1010 if (!buf) {
952 "request\n", sdata->dev->name); 1011 printk(KERN_DEBUG "%s: failed to allocate temporary IE "
1012 "buffer\n", sdata->name);
953 return; 1013 return;
954 } 1014 }
955 skb_reserve(skb, local->hw.extra_tx_headroom);
956 1015
957 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 1016 buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len,
958 memset(mgmt, 0, 24); 1017 local->hw.conf.channel->band);
959 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 1018
960 IEEE80211_STYPE_PROBE_REQ); 1019 skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
961 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 1020 ssid, ssid_len,
1021 buf, buf_len);
1022
962 if (dst) { 1023 if (dst) {
1024 mgmt = (struct ieee80211_mgmt *) skb->data;
963 memcpy(mgmt->da, dst, ETH_ALEN); 1025 memcpy(mgmt->da, dst, ETH_ALEN);
964 memcpy(mgmt->bssid, dst, ETH_ALEN); 1026 memcpy(mgmt->bssid, dst, ETH_ALEN);
965 } else {
966 memset(mgmt->da, 0xff, ETH_ALEN);
967 memset(mgmt->bssid, 0xff, ETH_ALEN);
968 } 1027 }
969 pos = skb_put(skb, 2 + ssid_len);
970 *pos++ = WLAN_EID_SSID;
971 *pos++ = ssid_len;
972 memcpy(pos, ssid, ssid_len);
973 pos += ssid_len;
974
975 skb_put(skb, ieee80211_build_preq_ies(local, pos, ie, ie_len,
976 local->hw.conf.channel->band));
977 1028
978 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 1029 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
979 ieee80211_tx_skb(sdata, skb); 1030 ieee80211_tx_skb(sdata, skb);
1031 kfree(buf);
980} 1032}
981 1033
982u32 ieee80211_sta_get_rates(struct ieee80211_local *local, 1034u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
@@ -1020,18 +1072,16 @@ void ieee80211_stop_device(struct ieee80211_local *local)
1020 ieee80211_led_radio(local, false); 1072 ieee80211_led_radio(local, false);
1021 1073
1022 cancel_work_sync(&local->reconfig_filter); 1074 cancel_work_sync(&local->reconfig_filter);
1023 drv_stop(local);
1024 1075
1025 flush_workqueue(local->workqueue); 1076 flush_workqueue(local->workqueue);
1077 drv_stop(local);
1026} 1078}
1027 1079
1028int ieee80211_reconfig(struct ieee80211_local *local) 1080int ieee80211_reconfig(struct ieee80211_local *local)
1029{ 1081{
1030 struct ieee80211_hw *hw = &local->hw; 1082 struct ieee80211_hw *hw = &local->hw;
1031 struct ieee80211_sub_if_data *sdata; 1083 struct ieee80211_sub_if_data *sdata;
1032 struct ieee80211_if_init_conf conf;
1033 struct sta_info *sta; 1084 struct sta_info *sta;
1034 unsigned long flags;
1035 int res; 1085 int res;
1036 1086
1037 if (local->suspended) 1087 if (local->suspended)
@@ -1039,7 +1089,19 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1039 1089
1040 /* restart hardware */ 1090 /* restart hardware */
1041 if (local->open_count) { 1091 if (local->open_count) {
1092 /*
1093 * Upon resume hardware can sometimes be goofy due to
1094 * various platform / driver / bus issues, so restarting
1095 * the device may at times not work immediately. Propagate
1096 * the error.
1097 */
1042 res = drv_start(local); 1098 res = drv_start(local);
1099 if (res) {
1100 WARN(local->suspended, "Harware became unavailable "
1101 "upon resume. This is could be a software issue"
1102 "prior to suspend or a hardware issue\n");
1103 return res;
1104 }
1043 1105
1044 ieee80211_led_radio(local, true); 1106 ieee80211_led_radio(local, true);
1045 } 1107 }
@@ -1048,29 +1110,24 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1048 list_for_each_entry(sdata, &local->interfaces, list) { 1110 list_for_each_entry(sdata, &local->interfaces, list) {
1049 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 1111 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1050 sdata->vif.type != NL80211_IFTYPE_MONITOR && 1112 sdata->vif.type != NL80211_IFTYPE_MONITOR &&
1051 netif_running(sdata->dev)) { 1113 ieee80211_sdata_running(sdata))
1052 conf.vif = &sdata->vif; 1114 res = drv_add_interface(local, &sdata->vif);
1053 conf.type = sdata->vif.type;
1054 conf.mac_addr = sdata->dev->dev_addr;
1055 res = drv_add_interface(local, &conf);
1056 }
1057 } 1115 }
1058 1116
1059 /* add STAs back */ 1117 /* add STAs back */
1060 if (local->ops->sta_notify) { 1118 mutex_lock(&local->sta_mtx);
1061 spin_lock_irqsave(&local->sta_lock, flags); 1119 list_for_each_entry(sta, &local->sta_list, list) {
1062 list_for_each_entry(sta, &local->sta_list, list) { 1120 if (sta->uploaded) {
1063 sdata = sta->sdata; 1121 sdata = sta->sdata;
1064 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1122 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1065 sdata = container_of(sdata->bss, 1123 sdata = container_of(sdata->bss,
1066 struct ieee80211_sub_if_data, 1124 struct ieee80211_sub_if_data,
1067 u.ap); 1125 u.ap);
1068 1126
1069 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD, 1127 WARN_ON(drv_sta_add(local, sdata, &sta->sta));
1070 &sta->sta);
1071 } 1128 }
1072 spin_unlock_irqrestore(&local->sta_lock, flags);
1073 } 1129 }
1130 mutex_unlock(&local->sta_mtx);
1074 1131
1075 /* Clear Suspend state so that ADDBA requests can be processed */ 1132 /* Clear Suspend state so that ADDBA requests can be processed */
1076 1133
@@ -1095,7 +1152,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1095 /* Finally also reconfigure all the BSS information */ 1152 /* Finally also reconfigure all the BSS information */
1096 list_for_each_entry(sdata, &local->interfaces, list) { 1153 list_for_each_entry(sdata, &local->interfaces, list) {
1097 u32 changed = ~0; 1154 u32 changed = ~0;
1098 if (!netif_running(sdata->dev)) 1155 if (!ieee80211_sdata_running(sdata))
1099 continue; 1156 continue;
1100 switch (sdata->vif.type) { 1157 switch (sdata->vif.type) {
1101 case NL80211_IFTYPE_STATION: 1158 case NL80211_IFTYPE_STATION:
@@ -1121,9 +1178,17 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1121 } 1178 }
1122 } 1179 }
1123 1180
1181 rcu_read_lock();
1182 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
1183 list_for_each_entry_rcu(sta, &local->sta_list, list) {
1184 ieee80211_sta_tear_down_BA_sessions(sta);
1185 }
1186 }
1187 rcu_read_unlock();
1188
1124 /* add back keys */ 1189 /* add back keys */
1125 list_for_each_entry(sdata, &local->interfaces, list) 1190 list_for_each_entry(sdata, &local->interfaces, list)
1126 if (netif_running(sdata->dev)) 1191 if (ieee80211_sdata_running(sdata))
1127 ieee80211_enable_keys(sdata); 1192 ieee80211_enable_keys(sdata);
1128 1193
1129 ieee80211_wake_queues_by_reason(hw, 1194 ieee80211_wake_queues_by_reason(hw,
@@ -1160,13 +1225,143 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1160 1225
1161 add_timer(&local->sta_cleanup); 1226 add_timer(&local->sta_cleanup);
1162 1227
1163 spin_lock_irqsave(&local->sta_lock, flags); 1228 mutex_lock(&local->sta_mtx);
1164 list_for_each_entry(sta, &local->sta_list, list) 1229 list_for_each_entry(sta, &local->sta_list, list)
1165 mesh_plink_restart(sta); 1230 mesh_plink_restart(sta);
1166 spin_unlock_irqrestore(&local->sta_lock, flags); 1231 mutex_unlock(&local->sta_mtx);
1167#else 1232#else
1168 WARN_ON(1); 1233 WARN_ON(1);
1169#endif 1234#endif
1170 return 0; 1235 return 0;
1171} 1236}
1172 1237
1238static int check_mgd_smps(struct ieee80211_if_managed *ifmgd,
1239 enum ieee80211_smps_mode *smps_mode)
1240{
1241 if (ifmgd->associated) {
1242 *smps_mode = ifmgd->ap_smps;
1243
1244 if (*smps_mode == IEEE80211_SMPS_AUTOMATIC) {
1245 if (ifmgd->powersave)
1246 *smps_mode = IEEE80211_SMPS_DYNAMIC;
1247 else
1248 *smps_mode = IEEE80211_SMPS_OFF;
1249 }
1250
1251 return 1;
1252 }
1253
1254 return 0;
1255}
1256
1257/* must hold iflist_mtx */
1258void ieee80211_recalc_smps(struct ieee80211_local *local,
1259 struct ieee80211_sub_if_data *forsdata)
1260{
1261 struct ieee80211_sub_if_data *sdata;
1262 enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_OFF;
1263 int count = 0;
1264
1265 if (forsdata)
1266 WARN_ON(!mutex_is_locked(&forsdata->u.mgd.mtx));
1267
1268 WARN_ON(!mutex_is_locked(&local->iflist_mtx));
1269
1270 /*
1271 * This function could be improved to handle multiple
1272 * interfaces better, but right now it makes any
1273 * non-station interfaces force SM PS to be turned
1274 * off. If there are multiple station interfaces it
1275 * could also use the best possible mode, e.g. if
1276 * one is in static and the other in dynamic then
1277 * dynamic is ok.
1278 */
1279
1280 list_for_each_entry(sdata, &local->interfaces, list) {
1281 if (!netif_running(sdata->dev))
1282 continue;
1283 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1284 goto set;
1285 if (sdata != forsdata) {
1286 /*
1287 * This nested is ok -- we are holding the iflist_mtx
1288 * so can't get here twice or so. But it's required
1289 * since normally we acquire it first and then the
1290 * iflist_mtx.
1291 */
1292 mutex_lock_nested(&sdata->u.mgd.mtx, SINGLE_DEPTH_NESTING);
1293 count += check_mgd_smps(&sdata->u.mgd, &smps_mode);
1294 mutex_unlock(&sdata->u.mgd.mtx);
1295 } else
1296 count += check_mgd_smps(&sdata->u.mgd, &smps_mode);
1297
1298 if (count > 1) {
1299 smps_mode = IEEE80211_SMPS_OFF;
1300 break;
1301 }
1302 }
1303
1304 if (smps_mode == local->smps_mode)
1305 return;
1306
1307 set:
1308 local->smps_mode = smps_mode;
1309 /* changed flag is auto-detected for this */
1310 ieee80211_hw_config(local, 0);
1311}
1312
1313static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id)
1314{
1315 int i;
1316
1317 for (i = 0; i < n_ids; i++)
1318 if (ids[i] == id)
1319 return true;
1320 return false;
1321}
1322
1323/**
1324 * ieee80211_ie_split - split an IE buffer according to ordering
1325 *
1326 * @ies: the IE buffer
1327 * @ielen: the length of the IE buffer
1328 * @ids: an array with element IDs that are allowed before
1329 * the split
1330 * @n_ids: the size of the element ID array
1331 * @offset: offset where to start splitting in the buffer
1332 *
1333 * This function splits an IE buffer by updating the @offset
1334 * variable to point to the location where the buffer should be
1335 * split.
1336 *
1337 * It assumes that the given IE buffer is well-formed, this
1338 * has to be guaranteed by the caller!
1339 *
1340 * It also assumes that the IEs in the buffer are ordered
1341 * correctly, if not the result of using this function will not
1342 * be ordered correctly either, i.e. it does no reordering.
1343 *
1344 * The function returns the offset where the next part of the
1345 * buffer starts, which may be @ielen if the entire (remainder)
1346 * of the buffer should be used.
1347 */
1348size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
1349 const u8 *ids, int n_ids, size_t offset)
1350{
1351 size_t pos = offset;
1352
1353 while (pos < ielen && ieee80211_id_in_list(ids, n_ids, ies[pos]))
1354 pos += 2 + ies[pos + 1];
1355
1356 return pos;
1357}
1358
1359size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset)
1360{
1361 size_t pos = offset;
1362
1363 while (pos < ielen && ies[pos] != WLAN_EID_VENDOR_SPECIFIC)
1364 pos += 2 + ies[pos + 1];
1365
1366 return pos;
1367}
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 247123fe1a7a..5d745f2d7236 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -305,20 +305,19 @@ static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
305{ 305{
306 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 306 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
307 307
308 if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { 308 if (!info->control.hw_key) {
309 if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key, 309 if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key,
310 tx->key->conf.keylen, 310 tx->key->conf.keylen,
311 tx->key->conf.keyidx)) 311 tx->key->conf.keyidx))
312 return -1; 312 return -1;
313 } else { 313 } else if (info->control.hw_key->flags &
314 info->control.hw_key = &tx->key->conf; 314 IEEE80211_KEY_FLAG_GENERATE_IV) {
315 if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) { 315 if (!ieee80211_wep_add_iv(tx->local, skb,
316 if (!ieee80211_wep_add_iv(tx->local, skb, 316 tx->key->conf.keylen,
317 tx->key->conf.keylen, 317 tx->key->conf.keyidx))
318 tx->key->conf.keyidx)) 318 return -1;
319 return -1;
320 }
321 } 319 }
320
322 return 0; 321 return 0;
323} 322}
324 323
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index b19b7696f3a2..34e6d02da779 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -44,22 +44,69 @@ static int wme_downgrade_ac(struct sk_buff *skb)
44} 44}
45 45
46 46
47/* Indicate which queue to use. */ 47/* Indicate which queue to use. */
48static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb) 48u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
49 struct sk_buff *skb)
49{ 50{
50 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 51 struct ieee80211_local *local = sdata->local;
52 struct sta_info *sta = NULL;
53 u32 sta_flags = 0;
54 const u8 *ra = NULL;
55 bool qos = false;
51 56
52 if (!ieee80211_is_data(hdr->frame_control)) { 57 if (local->hw.queues < 4 || skb->len < 6) {
53 /* management frames go on AC_VO queue, but are sent 58 skb->priority = 0; /* required for correct WPA/11i MIC */
54 * without QoS control fields */ 59 return min_t(u16, local->hw.queues - 1,
55 return 0; 60 ieee802_1d_to_ac[skb->priority]);
61 }
62
63 rcu_read_lock();
64 switch (sdata->vif.type) {
65 case NL80211_IFTYPE_AP_VLAN:
66 rcu_read_lock();
67 sta = rcu_dereference(sdata->u.vlan.sta);
68 if (sta)
69 sta_flags = get_sta_flags(sta);
70 rcu_read_unlock();
71 if (sta)
72 break;
73 case NL80211_IFTYPE_AP:
74 ra = skb->data;
75 break;
76 case NL80211_IFTYPE_WDS:
77 ra = sdata->u.wds.remote_addr;
78 break;
79#ifdef CONFIG_MAC80211_MESH
80 case NL80211_IFTYPE_MESH_POINT:
81 /*
82 * XXX: This is clearly broken ... but already was before,
83 * because ieee80211_fill_mesh_addresses() would clear A1
84 * except for multicast addresses.
85 */
86 break;
87#endif
88 case NL80211_IFTYPE_STATION:
89 ra = sdata->u.mgd.bssid;
90 break;
91 case NL80211_IFTYPE_ADHOC:
92 ra = skb->data;
93 break;
94 default:
95 break;
56 } 96 }
57 97
58 if (0 /* injected */) { 98 if (!sta && ra && !is_multicast_ether_addr(ra)) {
59 /* use AC from radiotap */ 99 sta = sta_info_get(sdata, ra);
100 if (sta)
101 sta_flags = get_sta_flags(sta);
60 } 102 }
61 103
62 if (!ieee80211_is_data_qos(hdr->frame_control)) { 104 if (sta_flags & WLAN_STA_WME)
105 qos = true;
106
107 rcu_read_unlock();
108
109 if (!qos) {
63 skb->priority = 0; /* required for correct WPA/11i MIC */ 110 skb->priority = 0; /* required for correct WPA/11i MIC */
64 return ieee802_1d_to_ac[skb->priority]; 111 return ieee802_1d_to_ac[skb->priority];
65 } 112 }
@@ -68,6 +115,12 @@ static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
68 * data frame has */ 115 * data frame has */
69 skb->priority = cfg80211_classify8021d(skb); 116 skb->priority = cfg80211_classify8021d(skb);
70 117
118 return ieee80211_downgrade_queue(local, skb);
119}
120
121u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
122 struct sk_buff *skb)
123{
71 /* in case we are a client verify acm is not set for this ac */ 124 /* in case we are a client verify acm is not set for this ac */
72 while (unlikely(local->wmm_acm & BIT(skb->priority))) { 125 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
73 if (wme_downgrade_ac(skb)) { 126 if (wme_downgrade_ac(skb)) {
@@ -85,24 +138,17 @@ static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
85 return ieee802_1d_to_ac[skb->priority]; 138 return ieee802_1d_to_ac[skb->priority];
86} 139}
87 140
88void ieee80211_select_queue(struct ieee80211_local *local, struct sk_buff *skb) 141void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb)
89{ 142{
90 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 143 struct ieee80211_hdr *hdr = (void *)skb->data;
91 u16 queue; 144
92 u8 tid; 145 /* Fill in the QoS header if there is one. */
93
94 queue = classify80211(local, skb);
95 if (unlikely(queue >= local->hw.queues))
96 queue = local->hw.queues - 1;
97
98 /*
99 * Now we know the 1d priority, fill in the QoS header if
100 * there is one (and we haven't done this before).
101 */
102 if (ieee80211_is_data_qos(hdr->frame_control)) { 146 if (ieee80211_is_data_qos(hdr->frame_control)) {
103 u8 *p = ieee80211_get_qos_ctl(hdr); 147 u8 *p = ieee80211_get_qos_ctl(hdr);
104 u8 ack_policy = 0; 148 u8 ack_policy = 0, tid;
149
105 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 150 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
151
106 if (unlikely(local->wifi_wme_noack_test)) 152 if (unlikely(local->wifi_wme_noack_test))
107 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK << 153 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
108 QOS_CONTROL_ACK_POLICY_SHIFT; 154 QOS_CONTROL_ACK_POLICY_SHIFT;
@@ -110,6 +156,4 @@ void ieee80211_select_queue(struct ieee80211_local *local, struct sk_buff *skb)
110 *p++ = ack_policy | tid; 156 *p++ = ack_policy | tid;
111 *p = 0; 157 *p = 0;
112 } 158 }
113
114 skb_set_queue_mapping(skb, queue);
115} 159}
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index d4fd87ca5118..6053b1c9feee 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -20,7 +20,11 @@
20 20
21extern const int ieee802_1d_to_ac[8]; 21extern const int ieee802_1d_to_ac[8];
22 22
23void ieee80211_select_queue(struct ieee80211_local *local, 23u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
24 struct sk_buff *skb); 24 struct sk_buff *skb);
25void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb);
26u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
27 struct sk_buff *skb);
28
25 29
26#endif /* _WME_H */ 30#endif /* _WME_H */
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
new file mode 100644
index 000000000000..1e1ea3007b06
--- /dev/null
+++ b/net/mac80211/work.c
@@ -0,0 +1,1100 @@
1/*
2 * mac80211 work implementation
3 *
4 * Copyright 2003-2008, Jouni Malinen <j@w1.fi>
5 * Copyright 2004, Instant802 Networks, Inc.
6 * Copyright 2005, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/delay.h>
17#include <linux/if_ether.h>
18#include <linux/skbuff.h>
19#include <linux/if_arp.h>
20#include <linux/etherdevice.h>
21#include <linux/crc32.h>
22#include <net/mac80211.h>
23#include <asm/unaligned.h>
24
25#include "ieee80211_i.h"
26#include "rate.h"
27
28#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
29#define IEEE80211_AUTH_MAX_TRIES 3
30#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
31#define IEEE80211_ASSOC_MAX_TRIES 3
32#define IEEE80211_MAX_PROBE_TRIES 5
33
34enum work_action {
35 WORK_ACT_NONE,
36 WORK_ACT_TIMEOUT,
37 WORK_ACT_DONE,
38};
39
40
41/* utils */
42static inline void ASSERT_WORK_MTX(struct ieee80211_local *local)
43{
44 WARN_ON(!mutex_is_locked(&local->work_mtx));
45}
46
47/*
48 * We can have multiple work items (and connection probing)
49 * scheduling this timer, but we need to take care to only
50 * reschedule it when it should fire _earlier_ than it was
51 * asked for before, or if it's not pending right now. This
52 * function ensures that. Note that it then is required to
53 * run this function for all timeouts after the first one
54 * has happened -- the work that runs from this timer will
55 * do that.
56 */
57static void run_again(struct ieee80211_local *local,
58 unsigned long timeout)
59{
60 ASSERT_WORK_MTX(local);
61
62 if (!timer_pending(&local->work_timer) ||
63 time_before(timeout, local->work_timer.expires))
64 mod_timer(&local->work_timer, timeout);
65}
66
67static void work_free_rcu(struct rcu_head *head)
68{
69 struct ieee80211_work *wk =
70 container_of(head, struct ieee80211_work, rcu_head);
71
72 kfree(wk);
73}
74
75void free_work(struct ieee80211_work *wk)
76{
77 call_rcu(&wk->rcu_head, work_free_rcu);
78}
79
80static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
81 struct ieee80211_supported_band *sband,
82 u32 *rates)
83{
84 int i, j, count;
85 *rates = 0;
86 count = 0;
87 for (i = 0; i < supp_rates_len; i++) {
88 int rate = (supp_rates[i] & 0x7F) * 5;
89
90 for (j = 0; j < sband->n_bitrates; j++)
91 if (sband->bitrates[j].bitrate == rate) {
92 *rates |= BIT(j);
93 count++;
94 break;
95 }
96 }
97
98 return count;
99}
100
101/* frame sending functions */
102
103static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
104 struct ieee80211_supported_band *sband,
105 struct ieee80211_channel *channel,
106 enum ieee80211_smps_mode smps)
107{
108 struct ieee80211_ht_info *ht_info;
109 u8 *pos;
110 u32 flags = channel->flags;
111 u16 cap = sband->ht_cap.cap;
112 __le16 tmp;
113
114 if (!sband->ht_cap.ht_supported)
115 return;
116
117 if (!ht_info_ie)
118 return;
119
120 if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info))
121 return;
122
123 ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2);
124
125 /* determine capability flags */
126
127 if (ieee80211_disable_40mhz_24ghz &&
128 sband->band == IEEE80211_BAND_2GHZ) {
129 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
130 cap &= ~IEEE80211_HT_CAP_SGI_40;
131 }
132
133 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
134 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
135 if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
136 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
137 cap &= ~IEEE80211_HT_CAP_SGI_40;
138 }
139 break;
140 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
141 if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
142 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
143 cap &= ~IEEE80211_HT_CAP_SGI_40;
144 }
145 break;
146 }
147
148 /* set SM PS mode properly */
149 cap &= ~IEEE80211_HT_CAP_SM_PS;
150 switch (smps) {
151 case IEEE80211_SMPS_AUTOMATIC:
152 case IEEE80211_SMPS_NUM_MODES:
153 WARN_ON(1);
154 case IEEE80211_SMPS_OFF:
155 cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
156 IEEE80211_HT_CAP_SM_PS_SHIFT;
157 break;
158 case IEEE80211_SMPS_STATIC:
159 cap |= WLAN_HT_CAP_SM_PS_STATIC <<
160 IEEE80211_HT_CAP_SM_PS_SHIFT;
161 break;
162 case IEEE80211_SMPS_DYNAMIC:
163 cap |= WLAN_HT_CAP_SM_PS_DYNAMIC <<
164 IEEE80211_HT_CAP_SM_PS_SHIFT;
165 break;
166 }
167
168 /* reserve and fill IE */
169
170 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
171 *pos++ = WLAN_EID_HT_CAPABILITY;
172 *pos++ = sizeof(struct ieee80211_ht_cap);
173 memset(pos, 0, sizeof(struct ieee80211_ht_cap));
174
175 /* capability flags */
176 tmp = cpu_to_le16(cap);
177 memcpy(pos, &tmp, sizeof(u16));
178 pos += sizeof(u16);
179
180 /* AMPDU parameters */
181 *pos++ = sband->ht_cap.ampdu_factor |
182 (sband->ht_cap.ampdu_density <<
183 IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
184
185 /* MCS set */
186 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
187 pos += sizeof(sband->ht_cap.mcs);
188
189 /* extended capabilities */
190 pos += sizeof(__le16);
191
192 /* BF capabilities */
193 pos += sizeof(__le32);
194
195 /* antenna selection */
196 pos += sizeof(u8);
197}
198
199static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
200 struct ieee80211_work *wk)
201{
202 struct ieee80211_local *local = sdata->local;
203 struct sk_buff *skb;
204 struct ieee80211_mgmt *mgmt;
205 u8 *pos, qos_info;
206 const u8 *ies;
207 size_t offset = 0, noffset;
208 int i, len, count, rates_len, supp_rates_len;
209 u16 capab;
210 struct ieee80211_supported_band *sband;
211 u32 rates = 0;
212
213 sband = local->hw.wiphy->bands[wk->chan->band];
214
215 /*
216 * Get all rates supported by the device and the AP as
217 * some APs don't like getting a superset of their rates
218 * in the association request (e.g. D-Link DAP 1353 in
219 * b-only mode)...
220 */
221 rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates,
222 wk->assoc.supp_rates_len,
223 sband, &rates);
224
225 skb = alloc_skb(local->hw.extra_tx_headroom +
226 sizeof(*mgmt) + /* bit too much but doesn't matter */
227 2 + wk->assoc.ssid_len + /* SSID */
228 4 + rates_len + /* (extended) rates */
229 4 + /* power capability */
230 2 + 2 * sband->n_channels + /* supported channels */
231 2 + sizeof(struct ieee80211_ht_cap) + /* HT */
232 wk->ie_len + /* extra IEs */
233 9, /* WMM */
234 GFP_KERNEL);
235 if (!skb) {
236 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
237 "frame\n", sdata->name);
238 return;
239 }
240 skb_reserve(skb, local->hw.extra_tx_headroom);
241
242 capab = WLAN_CAPABILITY_ESS;
243
244 if (sband->band == IEEE80211_BAND_2GHZ) {
245 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
246 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
247 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
248 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
249 }
250
251 if (wk->assoc.capability & WLAN_CAPABILITY_PRIVACY)
252 capab |= WLAN_CAPABILITY_PRIVACY;
253
254 if ((wk->assoc.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
255 (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
256 capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
257
258 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
259 memset(mgmt, 0, 24);
260 memcpy(mgmt->da, wk->filter_ta, ETH_ALEN);
261 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
262 memcpy(mgmt->bssid, wk->filter_ta, ETH_ALEN);
263
264 if (!is_zero_ether_addr(wk->assoc.prev_bssid)) {
265 skb_put(skb, 10);
266 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
267 IEEE80211_STYPE_REASSOC_REQ);
268 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
269 mgmt->u.reassoc_req.listen_interval =
270 cpu_to_le16(local->hw.conf.listen_interval);
271 memcpy(mgmt->u.reassoc_req.current_ap, wk->assoc.prev_bssid,
272 ETH_ALEN);
273 } else {
274 skb_put(skb, 4);
275 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
276 IEEE80211_STYPE_ASSOC_REQ);
277 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
278 mgmt->u.assoc_req.listen_interval =
279 cpu_to_le16(local->hw.conf.listen_interval);
280 }
281
282 /* SSID */
283 ies = pos = skb_put(skb, 2 + wk->assoc.ssid_len);
284 *pos++ = WLAN_EID_SSID;
285 *pos++ = wk->assoc.ssid_len;
286 memcpy(pos, wk->assoc.ssid, wk->assoc.ssid_len);
287
288 /* add all rates which were marked to be used above */
289 supp_rates_len = rates_len;
290 if (supp_rates_len > 8)
291 supp_rates_len = 8;
292
293 len = sband->n_bitrates;
294 pos = skb_put(skb, supp_rates_len + 2);
295 *pos++ = WLAN_EID_SUPP_RATES;
296 *pos++ = supp_rates_len;
297
298 count = 0;
299 for (i = 0; i < sband->n_bitrates; i++) {
300 if (BIT(i) & rates) {
301 int rate = sband->bitrates[i].bitrate;
302 *pos++ = (u8) (rate / 5);
303 if (++count == 8)
304 break;
305 }
306 }
307
308 if (rates_len > count) {
309 pos = skb_put(skb, rates_len - count + 2);
310 *pos++ = WLAN_EID_EXT_SUPP_RATES;
311 *pos++ = rates_len - count;
312
313 for (i++; i < sband->n_bitrates; i++) {
314 if (BIT(i) & rates) {
315 int rate = sband->bitrates[i].bitrate;
316 *pos++ = (u8) (rate / 5);
317 }
318 }
319 }
320
321 if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
322 /* 1. power capabilities */
323 pos = skb_put(skb, 4);
324 *pos++ = WLAN_EID_PWR_CAPABILITY;
325 *pos++ = 2;
326 *pos++ = 0; /* min tx power */
327 *pos++ = wk->chan->max_power; /* max tx power */
328
329 /* 2. supported channels */
330 /* TODO: get this in reg domain format */
331 pos = skb_put(skb, 2 * sband->n_channels + 2);
332 *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
333 *pos++ = 2 * sband->n_channels;
334 for (i = 0; i < sband->n_channels; i++) {
335 *pos++ = ieee80211_frequency_to_channel(
336 sband->channels[i].center_freq);
337 *pos++ = 1; /* one channel in the subband*/
338 }
339 }
340
341 /* if present, add any custom IEs that go before HT */
342 if (wk->ie_len && wk->ie) {
343 static const u8 before_ht[] = {
344 WLAN_EID_SSID,
345 WLAN_EID_SUPP_RATES,
346 WLAN_EID_EXT_SUPP_RATES,
347 WLAN_EID_PWR_CAPABILITY,
348 WLAN_EID_SUPPORTED_CHANNELS,
349 WLAN_EID_RSN,
350 WLAN_EID_QOS_CAPA,
351 WLAN_EID_RRM_ENABLED_CAPABILITIES,
352 WLAN_EID_MOBILITY_DOMAIN,
353 WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
354 };
355 noffset = ieee80211_ie_split(wk->ie, wk->ie_len,
356 before_ht, ARRAY_SIZE(before_ht),
357 offset);
358 pos = skb_put(skb, noffset - offset);
359 memcpy(pos, wk->ie + offset, noffset - offset);
360 offset = noffset;
361 }
362
363 if (wk->assoc.use_11n && wk->assoc.wmm_used &&
364 local->hw.queues >= 4)
365 ieee80211_add_ht_ie(skb, wk->assoc.ht_information_ie,
366 sband, wk->chan, wk->assoc.smps);
367
368 /* if present, add any custom non-vendor IEs that go after HT */
369 if (wk->ie_len && wk->ie) {
370 noffset = ieee80211_ie_split_vendor(wk->ie, wk->ie_len,
371 offset);
372 pos = skb_put(skb, noffset - offset);
373 memcpy(pos, wk->ie + offset, noffset - offset);
374 offset = noffset;
375 }
376
377 if (wk->assoc.wmm_used && local->hw.queues >= 4) {
378 if (wk->assoc.uapsd_used) {
379 qos_info = local->uapsd_queues;
380 qos_info |= (local->uapsd_max_sp_len <<
381 IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT);
382 } else {
383 qos_info = 0;
384 }
385
386 pos = skb_put(skb, 9);
387 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
388 *pos++ = 7; /* len */
389 *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
390 *pos++ = 0x50;
391 *pos++ = 0xf2;
392 *pos++ = 2; /* WME */
393 *pos++ = 0; /* WME info */
394 *pos++ = 1; /* WME ver */
395 *pos++ = qos_info;
396 }
397
398 /* add any remaining custom (i.e. vendor specific here) IEs */
399 if (wk->ie_len && wk->ie) {
400 noffset = wk->ie_len;
401 pos = skb_put(skb, noffset - offset);
402 memcpy(pos, wk->ie + offset, noffset - offset);
403 }
404
405 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
406 ieee80211_tx_skb(sdata, skb);
407}
408
409static void ieee80211_remove_auth_bss(struct ieee80211_local *local,
410 struct ieee80211_work *wk)
411{
412 struct cfg80211_bss *cbss;
413 u16 capa_val = WLAN_CAPABILITY_ESS;
414
415 if (wk->probe_auth.privacy)
416 capa_val |= WLAN_CAPABILITY_PRIVACY;
417
418 cbss = cfg80211_get_bss(local->hw.wiphy, wk->chan, wk->filter_ta,
419 wk->probe_auth.ssid, wk->probe_auth.ssid_len,
420 WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY,
421 capa_val);
422 if (!cbss)
423 return;
424
425 cfg80211_unlink_bss(local->hw.wiphy, cbss);
426 cfg80211_put_bss(cbss);
427}
428
429static enum work_action __must_check
430ieee80211_direct_probe(struct ieee80211_work *wk)
431{
432 struct ieee80211_sub_if_data *sdata = wk->sdata;
433 struct ieee80211_local *local = sdata->local;
434
435 wk->probe_auth.tries++;
436 if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
437 printk(KERN_DEBUG "%s: direct probe to %pM timed out\n",
438 sdata->name, wk->filter_ta);
439
440 /*
441 * Most likely AP is not in the range so remove the
442 * bss struct for that AP.
443 */
444 ieee80211_remove_auth_bss(local, wk);
445
446 return WORK_ACT_TIMEOUT;
447 }
448
449 printk(KERN_DEBUG "%s: direct probe to %pM (try %d)\n",
450 sdata->name, wk->filter_ta, wk->probe_auth.tries);
451
452 /*
453 * Direct probe is sent to broadcast address as some APs
454 * will not answer to direct packet in unassociated state.
455 */
456 ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid,
457 wk->probe_auth.ssid_len, NULL, 0);
458
459 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
460 run_again(local, wk->timeout);
461
462 return WORK_ACT_NONE;
463}
464
465
466static enum work_action __must_check
467ieee80211_authenticate(struct ieee80211_work *wk)
468{
469 struct ieee80211_sub_if_data *sdata = wk->sdata;
470 struct ieee80211_local *local = sdata->local;
471
472 wk->probe_auth.tries++;
473 if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
474 printk(KERN_DEBUG "%s: authentication with %pM"
475 " timed out\n", sdata->name, wk->filter_ta);
476
477 /*
478 * Most likely AP is not in the range so remove the
479 * bss struct for that AP.
480 */
481 ieee80211_remove_auth_bss(local, wk);
482
483 return WORK_ACT_TIMEOUT;
484 }
485
486 printk(KERN_DEBUG "%s: authenticate with %pM (try %d)\n",
487 sdata->name, wk->filter_ta, wk->probe_auth.tries);
488
489 ieee80211_send_auth(sdata, 1, wk->probe_auth.algorithm, wk->ie,
490 wk->ie_len, wk->filter_ta, NULL, 0, 0);
491 wk->probe_auth.transaction = 2;
492
493 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
494 run_again(local, wk->timeout);
495
496 return WORK_ACT_NONE;
497}
498
499static enum work_action __must_check
500ieee80211_associate(struct ieee80211_work *wk)
501{
502 struct ieee80211_sub_if_data *sdata = wk->sdata;
503 struct ieee80211_local *local = sdata->local;
504
505 wk->assoc.tries++;
506 if (wk->assoc.tries > IEEE80211_ASSOC_MAX_TRIES) {
507 printk(KERN_DEBUG "%s: association with %pM"
508 " timed out\n",
509 sdata->name, wk->filter_ta);
510
511 /*
512 * Most likely AP is not in the range so remove the
513 * bss struct for that AP.
514 */
515 if (wk->assoc.bss)
516 cfg80211_unlink_bss(local->hw.wiphy, wk->assoc.bss);
517
518 return WORK_ACT_TIMEOUT;
519 }
520
521 printk(KERN_DEBUG "%s: associate with %pM (try %d)\n",
522 sdata->name, wk->filter_ta, wk->assoc.tries);
523 ieee80211_send_assoc(sdata, wk);
524
525 wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
526 run_again(local, wk->timeout);
527
528 return WORK_ACT_NONE;
529}
530
531static enum work_action __must_check
532ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk)
533{
534 /*
535 * First time we run, do nothing -- the generic code will
536 * have switched to the right channel etc.
537 */
538 if (!wk->started) {
539 wk->timeout = jiffies + msecs_to_jiffies(wk->remain.duration);
540
541 cfg80211_ready_on_channel(wk->sdata->dev, (unsigned long) wk,
542 wk->chan, wk->chan_type,
543 wk->remain.duration, GFP_KERNEL);
544
545 return WORK_ACT_NONE;
546 }
547
548 return WORK_ACT_TIMEOUT;
549}
550
551static void ieee80211_auth_challenge(struct ieee80211_work *wk,
552 struct ieee80211_mgmt *mgmt,
553 size_t len)
554{
555 struct ieee80211_sub_if_data *sdata = wk->sdata;
556 u8 *pos;
557 struct ieee802_11_elems elems;
558
559 pos = mgmt->u.auth.variable;
560 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
561 if (!elems.challenge)
562 return;
563 ieee80211_send_auth(sdata, 3, wk->probe_auth.algorithm,
564 elems.challenge - 2, elems.challenge_len + 2,
565 wk->filter_ta, wk->probe_auth.key,
566 wk->probe_auth.key_len, wk->probe_auth.key_idx);
567 wk->probe_auth.transaction = 4;
568}
569
570static enum work_action __must_check
571ieee80211_rx_mgmt_auth(struct ieee80211_work *wk,
572 struct ieee80211_mgmt *mgmt, size_t len)
573{
574 u16 auth_alg, auth_transaction, status_code;
575
576 if (wk->type != IEEE80211_WORK_AUTH)
577 return WORK_ACT_NONE;
578
579 if (len < 24 + 6)
580 return WORK_ACT_NONE;
581
582 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
583 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
584 status_code = le16_to_cpu(mgmt->u.auth.status_code);
585
586 if (auth_alg != wk->probe_auth.algorithm ||
587 auth_transaction != wk->probe_auth.transaction)
588 return WORK_ACT_NONE;
589
590 if (status_code != WLAN_STATUS_SUCCESS) {
591 printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n",
592 wk->sdata->name, mgmt->sa, status_code);
593 return WORK_ACT_DONE;
594 }
595
596 switch (wk->probe_auth.algorithm) {
597 case WLAN_AUTH_OPEN:
598 case WLAN_AUTH_LEAP:
599 case WLAN_AUTH_FT:
600 break;
601 case WLAN_AUTH_SHARED_KEY:
602 if (wk->probe_auth.transaction != 4) {
603 ieee80211_auth_challenge(wk, mgmt, len);
604 /* need another frame */
605 return WORK_ACT_NONE;
606 }
607 break;
608 default:
609 WARN_ON(1);
610 return WORK_ACT_NONE;
611 }
612
613 printk(KERN_DEBUG "%s: authenticated\n", wk->sdata->name);
614 return WORK_ACT_DONE;
615}
616
617static enum work_action __must_check
618ieee80211_rx_mgmt_assoc_resp(struct ieee80211_work *wk,
619 struct ieee80211_mgmt *mgmt, size_t len,
620 bool reassoc)
621{
622 struct ieee80211_sub_if_data *sdata = wk->sdata;
623 struct ieee80211_local *local = sdata->local;
624 u16 capab_info, status_code, aid;
625 struct ieee802_11_elems elems;
626 u8 *pos;
627
628 /*
629 * AssocResp and ReassocResp have identical structure, so process both
630 * of them in this function.
631 */
632
633 if (len < 24 + 6)
634 return WORK_ACT_NONE;
635
636 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
637 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
638 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
639
640 printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
641 "status=%d aid=%d)\n",
642 sdata->name, reassoc ? "Rea" : "A", mgmt->sa,
643 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
644
645 pos = mgmt->u.assoc_resp.variable;
646 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
647
648 if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
649 elems.timeout_int && elems.timeout_int_len == 5 &&
650 elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
651 u32 tu, ms;
652 tu = get_unaligned_le32(elems.timeout_int + 1);
653 ms = tu * 1024 / 1000;
654 printk(KERN_DEBUG "%s: %pM rejected association temporarily; "
655 "comeback duration %u TU (%u ms)\n",
656 sdata->name, mgmt->sa, tu, ms);
657 wk->timeout = jiffies + msecs_to_jiffies(ms);
658 if (ms > IEEE80211_ASSOC_TIMEOUT)
659 run_again(local, wk->timeout);
660 return WORK_ACT_NONE;
661 }
662
663 if (status_code != WLAN_STATUS_SUCCESS)
664 printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n",
665 sdata->name, mgmt->sa, status_code);
666 else
667 printk(KERN_DEBUG "%s: associated\n", sdata->name);
668
669 return WORK_ACT_DONE;
670}
671
672static enum work_action __must_check
673ieee80211_rx_mgmt_probe_resp(struct ieee80211_work *wk,
674 struct ieee80211_mgmt *mgmt, size_t len,
675 struct ieee80211_rx_status *rx_status)
676{
677 struct ieee80211_sub_if_data *sdata = wk->sdata;
678 struct ieee80211_local *local = sdata->local;
679 size_t baselen;
680
681 ASSERT_WORK_MTX(local);
682
683 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
684 if (baselen > len)
685 return WORK_ACT_NONE;
686
687 printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
688 return WORK_ACT_DONE;
689}
690
691static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
692 struct sk_buff *skb)
693{
694 struct ieee80211_rx_status *rx_status;
695 struct ieee80211_mgmt *mgmt;
696 struct ieee80211_work *wk;
697 enum work_action rma = WORK_ACT_NONE;
698 u16 fc;
699
700 rx_status = (struct ieee80211_rx_status *) skb->cb;
701 mgmt = (struct ieee80211_mgmt *) skb->data;
702 fc = le16_to_cpu(mgmt->frame_control);
703
704 mutex_lock(&local->work_mtx);
705
706 list_for_each_entry(wk, &local->work_list, list) {
707 const u8 *bssid = NULL;
708
709 switch (wk->type) {
710 case IEEE80211_WORK_DIRECT_PROBE:
711 case IEEE80211_WORK_AUTH:
712 case IEEE80211_WORK_ASSOC:
713 bssid = wk->filter_ta;
714 break;
715 default:
716 continue;
717 }
718
719 /*
720 * Before queuing, we already verified mgmt->sa,
721 * so this is needed just for matching.
722 */
723 if (compare_ether_addr(bssid, mgmt->bssid))
724 continue;
725
726 switch (fc & IEEE80211_FCTL_STYPE) {
727 case IEEE80211_STYPE_PROBE_RESP:
728 rma = ieee80211_rx_mgmt_probe_resp(wk, mgmt, skb->len,
729 rx_status);
730 break;
731 case IEEE80211_STYPE_AUTH:
732 rma = ieee80211_rx_mgmt_auth(wk, mgmt, skb->len);
733 break;
734 case IEEE80211_STYPE_ASSOC_RESP:
735 rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
736 skb->len, false);
737 break;
738 case IEEE80211_STYPE_REASSOC_RESP:
739 rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
740 skb->len, true);
741 break;
742 default:
743 WARN_ON(1);
744 }
745 /*
746 * We've processed this frame for that work, so it can't
747 * belong to another work struct.
748 * NB: this is also required for correctness for 'rma'!
749 */
750 break;
751 }
752
753 switch (rma) {
754 case WORK_ACT_NONE:
755 break;
756 case WORK_ACT_DONE:
757 list_del_rcu(&wk->list);
758 break;
759 default:
760 WARN(1, "unexpected: %d", rma);
761 }
762
763 mutex_unlock(&local->work_mtx);
764
765 if (rma != WORK_ACT_DONE)
766 goto out;
767
768 switch (wk->done(wk, skb)) {
769 case WORK_DONE_DESTROY:
770 free_work(wk);
771 break;
772 case WORK_DONE_REQUEUE:
773 synchronize_rcu();
774 wk->started = false; /* restart */
775 mutex_lock(&local->work_mtx);
776 list_add_tail(&wk->list, &local->work_list);
777 mutex_unlock(&local->work_mtx);
778 }
779
780 out:
781 kfree_skb(skb);
782}
783
784static void ieee80211_work_timer(unsigned long data)
785{
786 struct ieee80211_local *local = (void *) data;
787
788 if (local->quiescing)
789 return;
790
791 ieee80211_queue_work(&local->hw, &local->work_work);
792}
793
794static void ieee80211_work_work(struct work_struct *work)
795{
796 struct ieee80211_local *local =
797 container_of(work, struct ieee80211_local, work_work);
798 struct sk_buff *skb;
799 struct ieee80211_work *wk, *tmp;
800 LIST_HEAD(free_work);
801 enum work_action rma;
802 bool remain_off_channel = false;
803
804 if (local->scanning)
805 return;
806
807 /*
808 * ieee80211_queue_work() should have picked up most cases,
809 * here we'll pick the the rest.
810 */
811 if (WARN(local->suspended, "work scheduled while going to suspend\n"))
812 return;
813
814 /* first process frames to avoid timing out while a frame is pending */
815 while ((skb = skb_dequeue(&local->work_skb_queue)))
816 ieee80211_work_rx_queued_mgmt(local, skb);
817
818 ieee80211_recalc_idle(local);
819
820 mutex_lock(&local->work_mtx);
821
822 list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
823 bool started = wk->started;
824
825 /* mark work as started if it's on the current off-channel */
826 if (!started && local->tmp_channel &&
827 wk->chan == local->tmp_channel &&
828 wk->chan_type == local->tmp_channel_type) {
829 started = true;
830 wk->timeout = jiffies;
831 }
832
833 if (!started && !local->tmp_channel) {
834 /*
835 * TODO: could optimize this by leaving the
836 * station vifs in awake mode if they
837 * happen to be on the same channel as
838 * the requested channel
839 */
840 ieee80211_offchannel_stop_beaconing(local);
841 ieee80211_offchannel_stop_station(local);
842
843 local->tmp_channel = wk->chan;
844 local->tmp_channel_type = wk->chan_type;
845 ieee80211_hw_config(local, 0);
846 started = true;
847 wk->timeout = jiffies;
848 }
849
850 /* don't try to work with items that aren't started */
851 if (!started)
852 continue;
853
854 if (time_is_after_jiffies(wk->timeout)) {
855 /*
856 * This work item isn't supposed to be worked on
857 * right now, but take care to adjust the timer
858 * properly.
859 */
860 run_again(local, wk->timeout);
861 continue;
862 }
863
864 switch (wk->type) {
865 default:
866 WARN_ON(1);
867 /* nothing */
868 rma = WORK_ACT_NONE;
869 break;
870 case IEEE80211_WORK_ABORT:
871 rma = WORK_ACT_TIMEOUT;
872 break;
873 case IEEE80211_WORK_DIRECT_PROBE:
874 rma = ieee80211_direct_probe(wk);
875 break;
876 case IEEE80211_WORK_AUTH:
877 rma = ieee80211_authenticate(wk);
878 break;
879 case IEEE80211_WORK_ASSOC:
880 rma = ieee80211_associate(wk);
881 break;
882 case IEEE80211_WORK_REMAIN_ON_CHANNEL:
883 rma = ieee80211_remain_on_channel_timeout(wk);
884 break;
885 }
886
887 wk->started = started;
888
889 switch (rma) {
890 case WORK_ACT_NONE:
891 /* might have changed the timeout */
892 run_again(local, wk->timeout);
893 break;
894 case WORK_ACT_TIMEOUT:
895 list_del_rcu(&wk->list);
896 synchronize_rcu();
897 list_add(&wk->list, &free_work);
898 break;
899 default:
900 WARN(1, "unexpected: %d", rma);
901 }
902 }
903
904 list_for_each_entry(wk, &local->work_list, list) {
905 if (!wk->started)
906 continue;
907 if (wk->chan != local->tmp_channel)
908 continue;
909 if (wk->chan_type != local->tmp_channel_type)
910 continue;
911 remain_off_channel = true;
912 }
913
914 if (!remain_off_channel && local->tmp_channel) {
915 local->tmp_channel = NULL;
916 ieee80211_hw_config(local, 0);
917 ieee80211_offchannel_return(local, true);
918 /* give connection some time to breathe */
919 run_again(local, jiffies + HZ/2);
920 }
921
922 if (list_empty(&local->work_list) && local->scan_req)
923 ieee80211_queue_delayed_work(&local->hw,
924 &local->scan_work,
925 round_jiffies_relative(0));
926
927 mutex_unlock(&local->work_mtx);
928
929 ieee80211_recalc_idle(local);
930
931 list_for_each_entry_safe(wk, tmp, &free_work, list) {
932 wk->done(wk, NULL);
933 list_del(&wk->list);
934 kfree(wk);
935 }
936}
937
938void ieee80211_add_work(struct ieee80211_work *wk)
939{
940 struct ieee80211_local *local;
941
942 if (WARN_ON(!wk->chan))
943 return;
944
945 if (WARN_ON(!wk->sdata))
946 return;
947
948 if (WARN_ON(!wk->done))
949 return;
950
951 if (WARN_ON(!ieee80211_sdata_running(wk->sdata)))
952 return;
953
954 wk->started = false;
955
956 local = wk->sdata->local;
957 mutex_lock(&local->work_mtx);
958 list_add_tail(&wk->list, &local->work_list);
959 mutex_unlock(&local->work_mtx);
960
961 ieee80211_queue_work(&local->hw, &local->work_work);
962}
963
964void ieee80211_work_init(struct ieee80211_local *local)
965{
966 mutex_init(&local->work_mtx);
967 INIT_LIST_HEAD(&local->work_list);
968 setup_timer(&local->work_timer, ieee80211_work_timer,
969 (unsigned long)local);
970 INIT_WORK(&local->work_work, ieee80211_work_work);
971 skb_queue_head_init(&local->work_skb_queue);
972}
973
974void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
975{
976 struct ieee80211_local *local = sdata->local;
977 struct ieee80211_work *wk;
978
979 mutex_lock(&local->work_mtx);
980 list_for_each_entry(wk, &local->work_list, list) {
981 if (wk->sdata != sdata)
982 continue;
983 wk->type = IEEE80211_WORK_ABORT;
984 wk->started = true;
985 wk->timeout = jiffies;
986 }
987 mutex_unlock(&local->work_mtx);
988
989 /* run cleanups etc. */
990 ieee80211_work_work(&local->work_work);
991
992 mutex_lock(&local->work_mtx);
993 list_for_each_entry(wk, &local->work_list, list) {
994 if (wk->sdata != sdata)
995 continue;
996 WARN_ON(1);
997 break;
998 }
999 mutex_unlock(&local->work_mtx);
1000}
1001
1002ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
1003 struct sk_buff *skb)
1004{
1005 struct ieee80211_local *local = sdata->local;
1006 struct ieee80211_mgmt *mgmt;
1007 struct ieee80211_work *wk;
1008 u16 fc;
1009
1010 if (skb->len < 24)
1011 return RX_DROP_MONITOR;
1012
1013 mgmt = (struct ieee80211_mgmt *) skb->data;
1014 fc = le16_to_cpu(mgmt->frame_control);
1015
1016 list_for_each_entry_rcu(wk, &local->work_list, list) {
1017 if (sdata != wk->sdata)
1018 continue;
1019 if (compare_ether_addr(wk->filter_ta, mgmt->sa))
1020 continue;
1021 if (compare_ether_addr(wk->filter_ta, mgmt->bssid))
1022 continue;
1023
1024 switch (fc & IEEE80211_FCTL_STYPE) {
1025 case IEEE80211_STYPE_AUTH:
1026 case IEEE80211_STYPE_PROBE_RESP:
1027 case IEEE80211_STYPE_ASSOC_RESP:
1028 case IEEE80211_STYPE_REASSOC_RESP:
1029 skb_queue_tail(&local->work_skb_queue, skb);
1030 ieee80211_queue_work(&local->hw, &local->work_work);
1031 return RX_QUEUED;
1032 }
1033 }
1034
1035 return RX_CONTINUE;
1036}
1037
1038static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk,
1039 struct sk_buff *skb)
1040{
1041 /*
1042 * We are done serving the remain-on-channel command.
1043 */
1044 cfg80211_remain_on_channel_expired(wk->sdata->dev, (unsigned long) wk,
1045 wk->chan, wk->chan_type,
1046 GFP_KERNEL);
1047
1048 return WORK_DONE_DESTROY;
1049}
1050
1051int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1052 struct ieee80211_channel *chan,
1053 enum nl80211_channel_type channel_type,
1054 unsigned int duration, u64 *cookie)
1055{
1056 struct ieee80211_work *wk;
1057
1058 wk = kzalloc(sizeof(*wk), GFP_KERNEL);
1059 if (!wk)
1060 return -ENOMEM;
1061
1062 wk->type = IEEE80211_WORK_REMAIN_ON_CHANNEL;
1063 wk->chan = chan;
1064 wk->chan_type = channel_type;
1065 wk->sdata = sdata;
1066 wk->done = ieee80211_remain_done;
1067
1068 wk->remain.duration = duration;
1069
1070 *cookie = (unsigned long) wk;
1071
1072 ieee80211_add_work(wk);
1073
1074 return 0;
1075}
1076
1077int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1078 u64 cookie)
1079{
1080 struct ieee80211_local *local = sdata->local;
1081 struct ieee80211_work *wk, *tmp;
1082 bool found = false;
1083
1084 mutex_lock(&local->work_mtx);
1085 list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
1086 if ((unsigned long) wk == cookie) {
1087 wk->timeout = jiffies;
1088 found = true;
1089 break;
1090 }
1091 }
1092 mutex_unlock(&local->work_mtx);
1093
1094 if (!found)
1095 return -ENOENT;
1096
1097 ieee80211_queue_work(&local->hw, &local->work_work);
1098
1099 return 0;
1100}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 5332014cb229..f4971cd45c64 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -31,8 +31,8 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
31 unsigned int hdrlen; 31 unsigned int hdrlen;
32 struct ieee80211_hdr *hdr; 32 struct ieee80211_hdr *hdr;
33 struct sk_buff *skb = tx->skb; 33 struct sk_buff *skb = tx->skb;
34 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
34 int authenticator; 35 int authenticator;
35 int wpa_test = 0;
36 int tail; 36 int tail;
37 37
38 hdr = (struct ieee80211_hdr *)skb->data; 38 hdr = (struct ieee80211_hdr *)skb->data;
@@ -47,16 +47,15 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
47 data = skb->data + hdrlen; 47 data = skb->data + hdrlen;
48 data_len = skb->len - hdrlen; 48 data_len = skb->len - hdrlen;
49 49
50 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 50 if (info->control.hw_key &&
51 !(tx->flags & IEEE80211_TX_FRAGMENTED) && 51 !(tx->flags & IEEE80211_TX_FRAGMENTED) &&
52 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) && 52 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
53 !wpa_test) { 53 /* hwaccel - with no need for SW-generated MMIC */
54 /* hwaccel - with no need for preallocated room for MMIC */
55 return TX_CONTINUE; 54 return TX_CONTINUE;
56 } 55 }
57 56
58 tail = MICHAEL_MIC_LEN; 57 tail = MICHAEL_MIC_LEN;
59 if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) 58 if (!info->control.hw_key)
60 tail += TKIP_ICV_LEN; 59 tail += TKIP_ICV_LEN;
61 60
62 if (WARN_ON(skb_tailroom(skb) < tail || 61 if (WARN_ON(skb_tailroom(skb) < tail ||
@@ -147,17 +146,16 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
147 int len, tail; 146 int len, tail;
148 u8 *pos; 147 u8 *pos;
149 148
150 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 149 if (info->control.hw_key &&
151 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { 150 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
152 /* hwaccel - with no need for preallocated room for IV/ICV */ 151 /* hwaccel - with no need for software-generated IV */
153 info->control.hw_key = &tx->key->conf;
154 return 0; 152 return 0;
155 } 153 }
156 154
157 hdrlen = ieee80211_hdrlen(hdr->frame_control); 155 hdrlen = ieee80211_hdrlen(hdr->frame_control);
158 len = skb->len - hdrlen; 156 len = skb->len - hdrlen;
159 157
160 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) 158 if (info->control.hw_key)
161 tail = 0; 159 tail = 0;
162 else 160 else
163 tail = TKIP_ICV_LEN; 161 tail = TKIP_ICV_LEN;
@@ -175,13 +173,11 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
175 if (key->u.tkip.tx.iv16 == 0) 173 if (key->u.tkip.tx.iv16 == 0)
176 key->u.tkip.tx.iv32++; 174 key->u.tkip.tx.iv32++;
177 175
178 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 176 pos = ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16);
179 /* hwaccel - with preallocated room for IV */
180 ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16);
181 177
182 info->control.hw_key = &tx->key->conf; 178 /* hwaccel - with software IV */
179 if (info->control.hw_key)
183 return 0; 180 return 0;
184 }
185 181
186 /* Add room for ICV */ 182 /* Add room for ICV */
187 skb_put(skb, TKIP_ICV_LEN); 183 skb_put(skb, TKIP_ICV_LEN);
@@ -363,24 +359,20 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
363 int hdrlen, len, tail; 359 int hdrlen, len, tail;
364 u8 *pos, *pn; 360 u8 *pos, *pn;
365 int i; 361 int i;
366 bool skip_hw;
367
368 skip_hw = (tx->key->conf.flags & IEEE80211_KEY_FLAG_SW_MGMT) &&
369 ieee80211_is_mgmt(hdr->frame_control);
370 362
371 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 363 if (info->control.hw_key &&
372 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) && 364 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
373 !skip_hw) { 365 /*
374 /* hwaccel - with no need for preallocated room for CCMP 366 * hwaccel has no need for preallocated room for CCMP
375 * header or MIC fields */ 367 * header or MIC fields
376 info->control.hw_key = &tx->key->conf; 368 */
377 return 0; 369 return 0;
378 } 370 }
379 371
380 hdrlen = ieee80211_hdrlen(hdr->frame_control); 372 hdrlen = ieee80211_hdrlen(hdr->frame_control);
381 len = skb->len - hdrlen; 373 len = skb->len - hdrlen;
382 374
383 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) 375 if (info->control.hw_key)
384 tail = 0; 376 tail = 0;
385 else 377 else
386 tail = CCMP_MIC_LEN; 378 tail = CCMP_MIC_LEN;
@@ -405,11 +397,9 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
405 397
406 ccmp_pn2hdr(pos, pn, key->conf.keyidx); 398 ccmp_pn2hdr(pos, pn, key->conf.keyidx);
407 399
408 if ((key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && !skip_hw) { 400 /* hwaccel - with software CCMP header */
409 /* hwaccel - with preallocated room for CCMP header */ 401 if (info->control.hw_key)
410 info->control.hw_key = &tx->key->conf;
411 return 0; 402 return 0;
412 }
413 403
414 pos += CCMP_HDR_LEN; 404 pos += CCMP_HDR_LEN;
415 ccmp_special_blocks(skb, pn, key->u.ccmp.tx_crypto_buf, 0); 405 ccmp_special_blocks(skb, pn, key->u.ccmp.tx_crypto_buf, 0);
@@ -525,11 +515,8 @@ ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
525 u8 *pn, aad[20]; 515 u8 *pn, aad[20];
526 int i; 516 int i;
527 517
528 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 518 if (info->control.hw_key)
529 /* hwaccel */
530 info->control.hw_key = &tx->key->conf;
531 return 0; 519 return 0;
532 }
533 520
534 if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) 521 if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
535 return TX_DROP; 522 return TX_DROP;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 634d14affc8d..18d77b5c351a 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -83,6 +83,19 @@ config NF_CONNTRACK_SECMARK
83 83
84 If unsure, say 'N'. 84 If unsure, say 'N'.
85 85
86config NF_CONNTRACK_ZONES
87 bool 'Connection tracking zones'
88 depends on NETFILTER_ADVANCED
89 depends on NETFILTER_XT_TARGET_CT
90 help
91 This option enables support for connection tracking zones.
92 Normally, each connection needs to have a unique system wide
93 identity. Connection tracking zones allow to have multiple
94 connections using the same identity, as long as they are
95 contained in different zones.
96
97 If unsure, say `N'.
98
86config NF_CONNTRACK_EVENTS 99config NF_CONNTRACK_EVENTS
87 bool "Connection tracking events" 100 bool "Connection tracking events"
88 depends on NETFILTER_ADVANCED 101 depends on NETFILTER_ADVANCED
@@ -341,6 +354,18 @@ config NETFILTER_XT_TARGET_CONNSECMARK
341 354
342 To compile it as a module, choose M here. If unsure, say N. 355 To compile it as a module, choose M here. If unsure, say N.
343 356
357config NETFILTER_XT_TARGET_CT
358 tristate '"CT" target support'
359 depends on NF_CONNTRACK
360 depends on IP_NF_RAW || IP6_NF_RAW
361 depends on NETFILTER_ADVANCED
362 help
363 This options adds a `CT' target, which allows to specify initial
364 connection tracking parameters like events to be delivered and
365 the helper to be used.
366
367 To compile it as a module, choose M here. If unsure, say N.
368
344config NETFILTER_XT_TARGET_DSCP 369config NETFILTER_XT_TARGET_DSCP
345 tristate '"DSCP" and "TOS" target support' 370 tristate '"DSCP" and "TOS" target support'
346 depends on IP_NF_MANGLE || IP6_NF_MANGLE 371 depends on IP_NF_MANGLE || IP6_NF_MANGLE
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 49f62ee4e9ff..f873644f02f6 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
44obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o 44obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
45obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o 45obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
46obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o 46obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
47obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
47obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o 48obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
48obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o 49obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
49obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o 50obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 79a698052218..712ccad13344 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -68,6 +68,10 @@ config IP_VS_TAB_BITS
68 each hash entry uses 8 bytes, so you can estimate how much memory is 68 each hash entry uses 8 bytes, so you can estimate how much memory is
69 needed for your box. 69 needed for your box.
70 70
71 You can overwrite this number setting conn_tab_bits module parameter
72 or by appending ip_vs.conn_tab_bits=? to the kernel command line
73 if IP VS was compiled built-in.
74
71comment "IPVS transport protocol load balancing support" 75comment "IPVS transport protocol load balancing support"
72 76
73config IP_VS_PROTO_TCP 77config IP_VS_PROTO_TCP
@@ -100,6 +104,13 @@ config IP_VS_PROTO_AH
100 This option enables support for load balancing AH (Authentication 104 This option enables support for load balancing AH (Authentication
101 Header) transport protocol. Say Y if unsure. 105 Header) transport protocol. Say Y if unsure.
102 106
107config IP_VS_PROTO_SCTP
108 bool "SCTP load balancing support"
109 select LIBCRC32C
110 ---help---
111 This option enables support for load balancing SCTP transport
112 protocol. Say Y if unsure.
113
103comment "IPVS scheduler" 114comment "IPVS scheduler"
104 115
105config IP_VS_RR 116config IP_VS_RR
@@ -112,7 +123,8 @@ config IP_VS_RR
112 module, choose M here. If unsure, say N. 123 module, choose M here. If unsure, say N.
113 124
114config IP_VS_WRR 125config IP_VS_WRR
115 tristate "weighted round-robin scheduling" 126 tristate "weighted round-robin scheduling"
127 select GCD
116 ---help--- 128 ---help---
117 The weighted robin-robin scheduling algorithm directs network 129 The weighted robin-robin scheduling algorithm directs network
118 connections to different real servers based on server weights 130 connections to different real servers based on server weights
diff --git a/net/netfilter/ipvs/Makefile b/net/netfilter/ipvs/Makefile
index 73a46fe1fe4c..e3baefd7066e 100644
--- a/net/netfilter/ipvs/Makefile
+++ b/net/netfilter/ipvs/Makefile
@@ -7,6 +7,7 @@ ip_vs_proto-objs-y :=
7ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o 7ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o
8ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o 8ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o
9ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH_ESP) += ip_vs_proto_ah_esp.o 9ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH_ESP) += ip_vs_proto_ah_esp.o
10ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_SCTP) += ip_vs_proto_sctp.o
10 11
11ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \ 12ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \
12 ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \ 13 ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 27c30cf933da..60bb41a8d8d4 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -40,6 +40,21 @@
40#include <net/ip_vs.h> 40#include <net/ip_vs.h>
41 41
42 42
43#ifndef CONFIG_IP_VS_TAB_BITS
44#define CONFIG_IP_VS_TAB_BITS 12
45#endif
46
47/*
48 * Connection hash size. Default is what was selected at compile time.
49*/
50int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
51module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444);
52MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size");
53
54/* size and mask values */
55int ip_vs_conn_tab_size;
56int ip_vs_conn_tab_mask;
57
43/* 58/*
44 * Connection hash table: for input and output packets lookups of IPVS 59 * Connection hash table: for input and output packets lookups of IPVS
45 */ 60 */
@@ -125,11 +140,11 @@ static unsigned int ip_vs_conn_hashkey(int af, unsigned proto,
125 if (af == AF_INET6) 140 if (af == AF_INET6)
126 return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd), 141 return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
127 (__force u32)port, proto, ip_vs_conn_rnd) 142 (__force u32)port, proto, ip_vs_conn_rnd)
128 & IP_VS_CONN_TAB_MASK; 143 & ip_vs_conn_tab_mask;
129#endif 144#endif
130 return jhash_3words((__force u32)addr->ip, (__force u32)port, proto, 145 return jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
131 ip_vs_conn_rnd) 146 ip_vs_conn_rnd)
132 & IP_VS_CONN_TAB_MASK; 147 & ip_vs_conn_tab_mask;
133} 148}
134 149
135 150
@@ -760,7 +775,7 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
760 int idx; 775 int idx;
761 struct ip_vs_conn *cp; 776 struct ip_vs_conn *cp;
762 777
763 for(idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) { 778 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
764 ct_read_lock_bh(idx); 779 ct_read_lock_bh(idx);
765 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { 780 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
766 if (pos-- == 0) { 781 if (pos-- == 0) {
@@ -797,7 +812,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
797 idx = l - ip_vs_conn_tab; 812 idx = l - ip_vs_conn_tab;
798 ct_read_unlock_bh(idx); 813 ct_read_unlock_bh(idx);
799 814
800 while (++idx < IP_VS_CONN_TAB_SIZE) { 815 while (++idx < ip_vs_conn_tab_size) {
801 ct_read_lock_bh(idx); 816 ct_read_lock_bh(idx);
802 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { 817 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
803 seq->private = &ip_vs_conn_tab[idx]; 818 seq->private = &ip_vs_conn_tab[idx];
@@ -976,8 +991,8 @@ void ip_vs_random_dropentry(void)
976 /* 991 /*
977 * Randomly scan 1/32 of the whole table every second 992 * Randomly scan 1/32 of the whole table every second
978 */ 993 */
979 for (idx = 0; idx < (IP_VS_CONN_TAB_SIZE>>5); idx++) { 994 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
980 unsigned hash = net_random() & IP_VS_CONN_TAB_MASK; 995 unsigned hash = net_random() & ip_vs_conn_tab_mask;
981 996
982 /* 997 /*
983 * Lock is actually needed in this loop. 998 * Lock is actually needed in this loop.
@@ -1029,7 +1044,7 @@ static void ip_vs_conn_flush(void)
1029 struct ip_vs_conn *cp; 1044 struct ip_vs_conn *cp;
1030 1045
1031 flush_again: 1046 flush_again:
1032 for (idx=0; idx<IP_VS_CONN_TAB_SIZE; idx++) { 1047 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
1033 /* 1048 /*
1034 * Lock is actually needed in this loop. 1049 * Lock is actually needed in this loop.
1035 */ 1050 */
@@ -1060,10 +1075,15 @@ int __init ip_vs_conn_init(void)
1060{ 1075{
1061 int idx; 1076 int idx;
1062 1077
1078 /* Compute size and mask */
1079 ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
1080 ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
1081
1063 /* 1082 /*
1064 * Allocate the connection hash table and initialize its list heads 1083 * Allocate the connection hash table and initialize its list heads
1065 */ 1084 */
1066 ip_vs_conn_tab = vmalloc(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head)); 1085 ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size *
1086 sizeof(struct list_head));
1067 if (!ip_vs_conn_tab) 1087 if (!ip_vs_conn_tab)
1068 return -ENOMEM; 1088 return -ENOMEM;
1069 1089
@@ -1078,12 +1098,12 @@ int __init ip_vs_conn_init(void)
1078 1098
1079 pr_info("Connection hash table configured " 1099 pr_info("Connection hash table configured "
1080 "(size=%d, memory=%ldKbytes)\n", 1100 "(size=%d, memory=%ldKbytes)\n",
1081 IP_VS_CONN_TAB_SIZE, 1101 ip_vs_conn_tab_size,
1082 (long)(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head))/1024); 1102 (long)(ip_vs_conn_tab_size*sizeof(struct list_head))/1024);
1083 IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n", 1103 IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n",
1084 sizeof(struct ip_vs_conn)); 1104 sizeof(struct ip_vs_conn));
1085 1105
1086 for (idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) { 1106 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
1087 INIT_LIST_HEAD(&ip_vs_conn_tab[idx]); 1107 INIT_LIST_HEAD(&ip_vs_conn_tab[idx]);
1088 } 1108 }
1089 1109
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index b95699f00545..44590887a92c 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -31,6 +31,7 @@
31#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <linux/ip.h> 32#include <linux/ip.h>
33#include <linux/tcp.h> 33#include <linux/tcp.h>
34#include <linux/sctp.h>
34#include <linux/icmp.h> 35#include <linux/icmp.h>
35 36
36#include <net/ip.h> 37#include <net/ip.h>
@@ -81,6 +82,8 @@ const char *ip_vs_proto_name(unsigned proto)
81 return "UDP"; 82 return "UDP";
82 case IPPROTO_TCP: 83 case IPPROTO_TCP:
83 return "TCP"; 84 return "TCP";
85 case IPPROTO_SCTP:
86 return "SCTP";
84 case IPPROTO_ICMP: 87 case IPPROTO_ICMP:
85 return "ICMP"; 88 return "ICMP";
86#ifdef CONFIG_IP_VS_IPV6 89#ifdef CONFIG_IP_VS_IPV6
@@ -512,8 +515,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
512 */ 515 */
513#ifdef CONFIG_IP_VS_IPV6 516#ifdef CONFIG_IP_VS_IPV6
514 if (svc->af == AF_INET6) 517 if (svc->af == AF_INET6)
515 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, 518 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
516 skb->dev);
517 else 519 else
518#endif 520#endif
519 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 521 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
@@ -589,8 +591,9 @@ void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
589 ip_send_check(ciph); 591 ip_send_check(ciph);
590 } 592 }
591 593
592 /* the TCP/UDP port */ 594 /* the TCP/UDP/SCTP port */
593 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol) { 595 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol ||
596 IPPROTO_SCTP == ciph->protocol) {
594 __be16 *ports = (void *)ciph + ciph->ihl*4; 597 __be16 *ports = (void *)ciph + ciph->ihl*4;
595 598
596 if (inout) 599 if (inout)
@@ -630,8 +633,9 @@ void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
630 ciph->saddr = cp->daddr.in6; 633 ciph->saddr = cp->daddr.in6;
631 } 634 }
632 635
633 /* the TCP/UDP port */ 636 /* the TCP/UDP/SCTP port */
634 if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr) { 637 if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr ||
638 IPPROTO_SCTP == ciph->nexthdr) {
635 __be16 *ports = (void *)ciph + sizeof(struct ipv6hdr); 639 __be16 *ports = (void *)ciph + sizeof(struct ipv6hdr);
636 640
637 if (inout) 641 if (inout)
@@ -679,7 +683,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
679 goto out; 683 goto out;
680 } 684 }
681 685
682 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol) 686 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
687 IPPROTO_SCTP == protocol)
683 offset += 2 * sizeof(__u16); 688 offset += 2 * sizeof(__u16);
684 if (!skb_make_writable(skb, offset)) 689 if (!skb_make_writable(skb, offset))
685 goto out; 690 goto out;
@@ -857,6 +862,21 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related)
857} 862}
858#endif 863#endif
859 864
865/*
866 * Check if sctp chunc is ABORT chunk
867 */
868static inline int is_sctp_abort(const struct sk_buff *skb, int nh_len)
869{
870 sctp_chunkhdr_t *sch, schunk;
871 sch = skb_header_pointer(skb, nh_len + sizeof(sctp_sctphdr_t),
872 sizeof(schunk), &schunk);
873 if (sch == NULL)
874 return 0;
875 if (sch->type == SCTP_CID_ABORT)
876 return 1;
877 return 0;
878}
879
860static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len) 880static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
861{ 881{
862 struct tcphdr _tcph, *th; 882 struct tcphdr _tcph, *th;
@@ -999,7 +1019,8 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb,
999 if (unlikely(!cp)) { 1019 if (unlikely(!cp)) {
1000 if (sysctl_ip_vs_nat_icmp_send && 1020 if (sysctl_ip_vs_nat_icmp_send &&
1001 (pp->protocol == IPPROTO_TCP || 1021 (pp->protocol == IPPROTO_TCP ||
1002 pp->protocol == IPPROTO_UDP)) { 1022 pp->protocol == IPPROTO_UDP ||
1023 pp->protocol == IPPROTO_SCTP)) {
1003 __be16 _ports[2], *pptr; 1024 __be16 _ports[2], *pptr;
1004 1025
1005 pptr = skb_header_pointer(skb, iph.len, 1026 pptr = skb_header_pointer(skb, iph.len,
@@ -1014,14 +1035,19 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb,
1014 * existing entry if it is not RST 1035 * existing entry if it is not RST
1015 * packet or not TCP packet. 1036 * packet or not TCP packet.
1016 */ 1037 */
1017 if (iph.protocol != IPPROTO_TCP 1038 if ((iph.protocol != IPPROTO_TCP &&
1018 || !is_tcp_reset(skb, iph.len)) { 1039 iph.protocol != IPPROTO_SCTP)
1040 || ((iph.protocol == IPPROTO_TCP
1041 && !is_tcp_reset(skb, iph.len))
1042 || (iph.protocol == IPPROTO_SCTP
1043 && !is_sctp_abort(skb,
1044 iph.len)))) {
1019#ifdef CONFIG_IP_VS_IPV6 1045#ifdef CONFIG_IP_VS_IPV6
1020 if (af == AF_INET6) 1046 if (af == AF_INET6)
1021 icmpv6_send(skb, 1047 icmpv6_send(skb,
1022 ICMPV6_DEST_UNREACH, 1048 ICMPV6_DEST_UNREACH,
1023 ICMPV6_PORT_UNREACH, 1049 ICMPV6_PORT_UNREACH,
1024 0, skb->dev); 1050 0);
1025 else 1051 else
1026#endif 1052#endif
1027 icmp_send(skb, 1053 icmp_send(skb,
@@ -1235,7 +1261,8 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
1235 1261
1236 /* do the statistics and put it back */ 1262 /* do the statistics and put it back */
1237 ip_vs_in_stats(cp, skb); 1263 ip_vs_in_stats(cp, skb);
1238 if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr) 1264 if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr ||
1265 IPPROTO_SCTP == cih->nexthdr)
1239 offset += 2 * sizeof(__u16); 1266 offset += 2 * sizeof(__u16);
1240 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset); 1267 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset);
1241 /* do not touch skb anymore */ 1268 /* do not touch skb anymore */
@@ -1358,6 +1385,21 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
1358 * encorage the standby servers to update the connections timeout 1385 * encorage the standby servers to update the connections timeout
1359 */ 1386 */
1360 pkts = atomic_add_return(1, &cp->in_pkts); 1387 pkts = atomic_add_return(1, &cp->in_pkts);
1388 if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
1389 cp->protocol == IPPROTO_SCTP) {
1390 if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
1391 (atomic_read(&cp->in_pkts) %
1392 sysctl_ip_vs_sync_threshold[1]
1393 == sysctl_ip_vs_sync_threshold[0])) ||
1394 (cp->old_state != cp->state &&
1395 ((cp->state == IP_VS_SCTP_S_CLOSED) ||
1396 (cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) ||
1397 (cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) {
1398 ip_vs_sync_conn(cp);
1399 goto out;
1400 }
1401 }
1402
1361 if (af == AF_INET && 1403 if (af == AF_INET &&
1362 (ip_vs_sync_state & IP_VS_STATE_MASTER) && 1404 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
1363 (((cp->protocol != IPPROTO_TCP || 1405 (((cp->protocol != IPPROTO_TCP ||
@@ -1366,9 +1408,11 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
1366 == sysctl_ip_vs_sync_threshold[0])) || 1408 == sysctl_ip_vs_sync_threshold[0])) ||
1367 ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) && 1409 ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
1368 ((cp->state == IP_VS_TCP_S_FIN_WAIT) || 1410 ((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
1411 (cp->state == IP_VS_TCP_S_CLOSE) ||
1369 (cp->state == IP_VS_TCP_S_CLOSE_WAIT) || 1412 (cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
1370 (cp->state == IP_VS_TCP_S_TIME_WAIT))))) 1413 (cp->state == IP_VS_TCP_S_TIME_WAIT)))))
1371 ip_vs_sync_conn(cp); 1414 ip_vs_sync_conn(cp);
1415out:
1372 cp->old_state = cp->state; 1416 cp->old_state = cp->state;
1373 1417
1374 ip_vs_conn_put(cp); 1418 ip_vs_conn_put(cp);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index e55a6861d26f..7ee9c3426f44 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1843,7 +1843,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1843 if (v == SEQ_START_TOKEN) { 1843 if (v == SEQ_START_TOKEN) {
1844 seq_printf(seq, 1844 seq_printf(seq,
1845 "IP Virtual Server version %d.%d.%d (size=%d)\n", 1845 "IP Virtual Server version %d.%d.%d (size=%d)\n",
1846 NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE); 1846 NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size);
1847 seq_puts(seq, 1847 seq_puts(seq,
1848 "Prot LocalAddress:Port Scheduler Flags\n"); 1848 "Prot LocalAddress:Port Scheduler Flags\n");
1849 seq_puts(seq, 1849 seq_puts(seq,
@@ -2077,6 +2077,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2077 if (!capable(CAP_NET_ADMIN)) 2077 if (!capable(CAP_NET_ADMIN))
2078 return -EPERM; 2078 return -EPERM;
2079 2079
2080 if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX)
2081 return -EINVAL;
2082 if (len < 0 || len > MAX_ARG_LEN)
2083 return -EINVAL;
2080 if (len != set_arglen[SET_CMDID(cmd)]) { 2084 if (len != set_arglen[SET_CMDID(cmd)]) {
2081 pr_err("set_ctl: len %u != %u\n", 2085 pr_err("set_ctl: len %u != %u\n",
2082 len, set_arglen[SET_CMDID(cmd)]); 2086 len, set_arglen[SET_CMDID(cmd)]);
@@ -2128,8 +2132,9 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2128 } 2132 }
2129 } 2133 }
2130 2134
2131 /* Check for valid protocol: TCP or UDP, even for fwmark!=0 */ 2135 /* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */
2132 if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP) { 2136 if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP &&
2137 usvc.protocol != IPPROTO_SCTP) {
2133 pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n", 2138 pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n",
2134 usvc.protocol, &usvc.addr.ip, 2139 usvc.protocol, &usvc.addr.ip,
2135 ntohs(usvc.port), usvc.sched_name); 2140 ntohs(usvc.port), usvc.sched_name);
@@ -2352,17 +2357,25 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2352{ 2357{
2353 unsigned char arg[128]; 2358 unsigned char arg[128];
2354 int ret = 0; 2359 int ret = 0;
2360 unsigned int copylen;
2355 2361
2356 if (!capable(CAP_NET_ADMIN)) 2362 if (!capable(CAP_NET_ADMIN))
2357 return -EPERM; 2363 return -EPERM;
2358 2364
2365 if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX)
2366 return -EINVAL;
2367
2359 if (*len < get_arglen[GET_CMDID(cmd)]) { 2368 if (*len < get_arglen[GET_CMDID(cmd)]) {
2360 pr_err("get_ctl: len %u < %u\n", 2369 pr_err("get_ctl: len %u < %u\n",
2361 *len, get_arglen[GET_CMDID(cmd)]); 2370 *len, get_arglen[GET_CMDID(cmd)]);
2362 return -EINVAL; 2371 return -EINVAL;
2363 } 2372 }
2364 2373
2365 if (copy_from_user(arg, user, get_arglen[GET_CMDID(cmd)]) != 0) 2374 copylen = get_arglen[GET_CMDID(cmd)];
2375 if (copylen > 128)
2376 return -EINVAL;
2377
2378 if (copy_from_user(arg, user, copylen) != 0)
2366 return -EFAULT; 2379 return -EFAULT;
2367 2380
2368 if (mutex_lock_interruptible(&__ip_vs_mutex)) 2381 if (mutex_lock_interruptible(&__ip_vs_mutex))
@@ -2374,7 +2387,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2374 char buf[64]; 2387 char buf[64];
2375 2388
2376 sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)", 2389 sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)",
2377 NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE); 2390 NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size);
2378 if (copy_to_user(user, buf, strlen(buf)+1) != 0) { 2391 if (copy_to_user(user, buf, strlen(buf)+1) != 0) {
2379 ret = -EFAULT; 2392 ret = -EFAULT;
2380 goto out; 2393 goto out;
@@ -2387,7 +2400,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2387 { 2400 {
2388 struct ip_vs_getinfo info; 2401 struct ip_vs_getinfo info;
2389 info.version = IP_VS_VERSION_CODE; 2402 info.version = IP_VS_VERSION_CODE;
2390 info.size = IP_VS_CONN_TAB_SIZE; 2403 info.size = ip_vs_conn_tab_size;
2391 info.num_services = ip_vs_num_services; 2404 info.num_services = ip_vs_num_services;
2392 if (copy_to_user(user, &info, sizeof(info)) != 0) 2405 if (copy_to_user(user, &info, sizeof(info)) != 0)
2393 ret = -EFAULT; 2406 ret = -EFAULT;
@@ -2714,6 +2727,8 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
2714 if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr)))) 2727 if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr))))
2715 return -EINVAL; 2728 return -EINVAL;
2716 2729
2730 memset(usvc, 0, sizeof(*usvc));
2731
2717 usvc->af = nla_get_u16(nla_af); 2732 usvc->af = nla_get_u16(nla_af);
2718#ifdef CONFIG_IP_VS_IPV6 2733#ifdef CONFIG_IP_VS_IPV6
2719 if (usvc->af != AF_INET && usvc->af != AF_INET6) 2734 if (usvc->af != AF_INET && usvc->af != AF_INET6)
@@ -2901,6 +2916,8 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
2901 if (!(nla_addr && nla_port)) 2916 if (!(nla_addr && nla_port))
2902 return -EINVAL; 2917 return -EINVAL;
2903 2918
2919 memset(udest, 0, sizeof(*udest));
2920
2904 nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr)); 2921 nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr));
2905 udest->port = nla_get_u16(nla_port); 2922 udest->port = nla_get_u16(nla_port);
2906 2923
@@ -3227,7 +3244,7 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
3227 case IPVS_CMD_GET_INFO: 3244 case IPVS_CMD_GET_INFO:
3228 NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE); 3245 NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE);
3229 NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE, 3246 NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
3230 IP_VS_CONN_TAB_SIZE); 3247 ip_vs_conn_tab_size);
3231 break; 3248 break;
3232 } 3249 }
3233 3250
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 33e2c799cba7..73f38ea98f25 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -208,7 +208,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
208 */ 208 */
209 from.ip = n_cp->vaddr.ip; 209 from.ip = n_cp->vaddr.ip;
210 port = n_cp->vport; 210 port = n_cp->vport;
211 sprintf(buf, "%d,%d,%d,%d,%d,%d", NIPQUAD(from.ip), 211 sprintf(buf, "%u,%u,%u,%u,%u,%u", NIPQUAD(from.ip),
212 (ntohs(port)>>8)&255, ntohs(port)&255); 212 (ntohs(port)>>8)&255, ntohs(port)&255);
213 buf_len = strlen(buf); 213 buf_len = strlen(buf);
214 214
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index f7476b95ab46..caa58fa1438a 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -45,6 +45,7 @@
45#include <linux/kernel.h> 45#include <linux/kernel.h>
46#include <linux/skbuff.h> 46#include <linux/skbuff.h>
47#include <linux/jiffies.h> 47#include <linux/jiffies.h>
48#include <linux/list.h>
48 49
49/* for sysctl */ 50/* for sysctl */
50#include <linux/fs.h> 51#include <linux/fs.h>
@@ -85,25 +86,25 @@ static int sysctl_ip_vs_lblcr_expiration = 24*60*60*HZ;
85/* 86/*
86 * IPVS destination set structure and operations 87 * IPVS destination set structure and operations
87 */ 88 */
88struct ip_vs_dest_list { 89struct ip_vs_dest_set_elem {
89 struct ip_vs_dest_list *next; /* list link */ 90 struct list_head list; /* list link */
90 struct ip_vs_dest *dest; /* destination server */ 91 struct ip_vs_dest *dest; /* destination server */
91}; 92};
92 93
93struct ip_vs_dest_set { 94struct ip_vs_dest_set {
94 atomic_t size; /* set size */ 95 atomic_t size; /* set size */
95 unsigned long lastmod; /* last modified time */ 96 unsigned long lastmod; /* last modified time */
96 struct ip_vs_dest_list *list; /* destination list */ 97 struct list_head list; /* destination list */
97 rwlock_t lock; /* lock for this list */ 98 rwlock_t lock; /* lock for this list */
98}; 99};
99 100
100 101
101static struct ip_vs_dest_list * 102static struct ip_vs_dest_set_elem *
102ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) 103ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
103{ 104{
104 struct ip_vs_dest_list *e; 105 struct ip_vs_dest_set_elem *e;
105 106
106 for (e=set->list; e!=NULL; e=e->next) { 107 list_for_each_entry(e, &set->list, list) {
107 if (e->dest == dest) 108 if (e->dest == dest)
108 /* already existed */ 109 /* already existed */
109 return NULL; 110 return NULL;
@@ -118,9 +119,7 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
118 atomic_inc(&dest->refcnt); 119 atomic_inc(&dest->refcnt);
119 e->dest = dest; 120 e->dest = dest;
120 121
121 /* link it to the list */ 122 list_add(&e->list, &set->list);
122 e->next = set->list;
123 set->list = e;
124 atomic_inc(&set->size); 123 atomic_inc(&set->size);
125 124
126 set->lastmod = jiffies; 125 set->lastmod = jiffies;
@@ -130,34 +129,33 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
130static void 129static void
131ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) 130ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
132{ 131{
133 struct ip_vs_dest_list *e, **ep; 132 struct ip_vs_dest_set_elem *e;
134 133
135 for (ep=&set->list, e=*ep; e!=NULL; e=*ep) { 134 list_for_each_entry(e, &set->list, list) {
136 if (e->dest == dest) { 135 if (e->dest == dest) {
137 /* HIT */ 136 /* HIT */
138 *ep = e->next;
139 atomic_dec(&set->size); 137 atomic_dec(&set->size);
140 set->lastmod = jiffies; 138 set->lastmod = jiffies;
141 atomic_dec(&e->dest->refcnt); 139 atomic_dec(&e->dest->refcnt);
140 list_del(&e->list);
142 kfree(e); 141 kfree(e);
143 break; 142 break;
144 } 143 }
145 ep = &e->next;
146 } 144 }
147} 145}
148 146
149static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set) 147static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
150{ 148{
151 struct ip_vs_dest_list *e, **ep; 149 struct ip_vs_dest_set_elem *e, *ep;
152 150
153 write_lock(&set->lock); 151 write_lock(&set->lock);
154 for (ep=&set->list, e=*ep; e!=NULL; e=*ep) { 152 list_for_each_entry_safe(e, ep, &set->list, list) {
155 *ep = e->next;
156 /* 153 /*
157 * We don't kfree dest because it is refered either 154 * We don't kfree dest because it is refered either
158 * by its service or by the trash dest list. 155 * by its service or by the trash dest list.
159 */ 156 */
160 atomic_dec(&e->dest->refcnt); 157 atomic_dec(&e->dest->refcnt);
158 list_del(&e->list);
161 kfree(e); 159 kfree(e);
162 } 160 }
163 write_unlock(&set->lock); 161 write_unlock(&set->lock);
@@ -166,7 +164,7 @@ static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
166/* get weighted least-connection node in the destination set */ 164/* get weighted least-connection node in the destination set */
167static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) 165static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
168{ 166{
169 register struct ip_vs_dest_list *e; 167 register struct ip_vs_dest_set_elem *e;
170 struct ip_vs_dest *dest, *least; 168 struct ip_vs_dest *dest, *least;
171 int loh, doh; 169 int loh, doh;
172 170
@@ -174,7 +172,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
174 return NULL; 172 return NULL;
175 173
176 /* select the first destination server, whose weight > 0 */ 174 /* select the first destination server, whose weight > 0 */
177 for (e=set->list; e!=NULL; e=e->next) { 175 list_for_each_entry(e, &set->list, list) {
178 least = e->dest; 176 least = e->dest;
179 if (least->flags & IP_VS_DEST_F_OVERLOAD) 177 if (least->flags & IP_VS_DEST_F_OVERLOAD)
180 continue; 178 continue;
@@ -190,7 +188,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
190 188
191 /* find the destination with the weighted least load */ 189 /* find the destination with the weighted least load */
192 nextstage: 190 nextstage:
193 for (e=e->next; e!=NULL; e=e->next) { 191 list_for_each_entry(e, &set->list, list) {
194 dest = e->dest; 192 dest = e->dest;
195 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 193 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
196 continue; 194 continue;
@@ -220,7 +218,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
220/* get weighted most-connection node in the destination set */ 218/* get weighted most-connection node in the destination set */
221static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) 219static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
222{ 220{
223 register struct ip_vs_dest_list *e; 221 register struct ip_vs_dest_set_elem *e;
224 struct ip_vs_dest *dest, *most; 222 struct ip_vs_dest *dest, *most;
225 int moh, doh; 223 int moh, doh;
226 224
@@ -228,7 +226,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
228 return NULL; 226 return NULL;
229 227
230 /* select the first destination server, whose weight > 0 */ 228 /* select the first destination server, whose weight > 0 */
231 for (e=set->list; e!=NULL; e=e->next) { 229 list_for_each_entry(e, &set->list, list) {
232 most = e->dest; 230 most = e->dest;
233 if (atomic_read(&most->weight) > 0) { 231 if (atomic_read(&most->weight) > 0) {
234 moh = atomic_read(&most->activeconns) * 50 232 moh = atomic_read(&most->activeconns) * 50
@@ -240,7 +238,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
240 238
241 /* find the destination with the weighted most load */ 239 /* find the destination with the weighted most load */
242 nextstage: 240 nextstage:
243 for (e=e->next; e!=NULL; e=e->next) { 241 list_for_each_entry(e, &set->list, list) {
244 dest = e->dest; 242 dest = e->dest;
245 doh = atomic_read(&dest->activeconns) * 50 243 doh = atomic_read(&dest->activeconns) * 50
246 + atomic_read(&dest->inactconns); 244 + atomic_read(&dest->inactconns);
@@ -389,7 +387,7 @@ ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
389 387
390 /* initilize its dest set */ 388 /* initilize its dest set */
391 atomic_set(&(en->set.size), 0); 389 atomic_set(&(en->set.size), 0);
392 en->set.list = NULL; 390 INIT_LIST_HEAD(&en->set.list);
393 rwlock_init(&en->set.lock); 391 rwlock_init(&en->set.lock);
394 392
395 ip_vs_lblcr_hash(tbl, en); 393 ip_vs_lblcr_hash(tbl, en);
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 3e7671674549..0e584553819d 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -257,6 +257,9 @@ int __init ip_vs_protocol_init(void)
257#ifdef CONFIG_IP_VS_PROTO_UDP 257#ifdef CONFIG_IP_VS_PROTO_UDP
258 REGISTER_PROTOCOL(&ip_vs_protocol_udp); 258 REGISTER_PROTOCOL(&ip_vs_protocol_udp);
259#endif 259#endif
260#ifdef CONFIG_IP_VS_PROTO_SCTP
261 REGISTER_PROTOCOL(&ip_vs_protocol_sctp);
262#endif
260#ifdef CONFIG_IP_VS_PROTO_AH 263#ifdef CONFIG_IP_VS_PROTO_AH
261 REGISTER_PROTOCOL(&ip_vs_protocol_ah); 264 REGISTER_PROTOCOL(&ip_vs_protocol_ah);
262#endif 265#endif
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
new file mode 100644
index 000000000000..c9a3f7a21d53
--- /dev/null
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -0,0 +1,1183 @@
1#include <linux/kernel.h>
2#include <linux/ip.h>
3#include <linux/sctp.h>
4#include <net/ip.h>
5#include <net/ip6_checksum.h>
6#include <linux/netfilter.h>
7#include <linux/netfilter_ipv4.h>
8#include <net/sctp/checksum.h>
9#include <net/ip_vs.h>
10
11
12static struct ip_vs_conn *
13sctp_conn_in_get(int af,
14 const struct sk_buff *skb,
15 struct ip_vs_protocol *pp,
16 const struct ip_vs_iphdr *iph,
17 unsigned int proto_off,
18 int inverse)
19{
20 __be16 _ports[2], *pptr;
21
22 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
23 if (pptr == NULL)
24 return NULL;
25
26 if (likely(!inverse))
27 return ip_vs_conn_in_get(af, iph->protocol,
28 &iph->saddr, pptr[0],
29 &iph->daddr, pptr[1]);
30 else
31 return ip_vs_conn_in_get(af, iph->protocol,
32 &iph->daddr, pptr[1],
33 &iph->saddr, pptr[0]);
34}
35
36static struct ip_vs_conn *
37sctp_conn_out_get(int af,
38 const struct sk_buff *skb,
39 struct ip_vs_protocol *pp,
40 const struct ip_vs_iphdr *iph,
41 unsigned int proto_off,
42 int inverse)
43{
44 __be16 _ports[2], *pptr;
45
46 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
47 if (pptr == NULL)
48 return NULL;
49
50 if (likely(!inverse))
51 return ip_vs_conn_out_get(af, iph->protocol,
52 &iph->saddr, pptr[0],
53 &iph->daddr, pptr[1]);
54 else
55 return ip_vs_conn_out_get(af, iph->protocol,
56 &iph->daddr, pptr[1],
57 &iph->saddr, pptr[0]);
58}
59
60static int
61sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
62 int *verdict, struct ip_vs_conn **cpp)
63{
64 struct ip_vs_service *svc;
65 sctp_chunkhdr_t _schunkh, *sch;
66 sctp_sctphdr_t *sh, _sctph;
67 struct ip_vs_iphdr iph;
68
69 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
70
71 sh = skb_header_pointer(skb, iph.len, sizeof(_sctph), &_sctph);
72 if (sh == NULL)
73 return 0;
74
75 sch = skb_header_pointer(skb, iph.len + sizeof(sctp_sctphdr_t),
76 sizeof(_schunkh), &_schunkh);
77 if (sch == NULL)
78 return 0;
79
80 if ((sch->type == SCTP_CID_INIT) &&
81 (svc = ip_vs_service_get(af, skb->mark, iph.protocol,
82 &iph.daddr, sh->dest))) {
83 if (ip_vs_todrop()) {
84 /*
85 * It seems that we are very loaded.
86 * We have to drop this packet :(
87 */
88 ip_vs_service_put(svc);
89 *verdict = NF_DROP;
90 return 0;
91 }
92 /*
93 * Let the virtual server select a real server for the
94 * incoming connection, and create a connection entry.
95 */
96 *cpp = ip_vs_schedule(svc, skb);
97 if (!*cpp) {
98 *verdict = ip_vs_leave(svc, skb, pp);
99 return 0;
100 }
101 ip_vs_service_put(svc);
102 }
103
104 return 1;
105}
106
107static int
108sctp_snat_handler(struct sk_buff *skb,
109 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
110{
111 sctp_sctphdr_t *sctph;
112 unsigned int sctphoff;
113 __be32 crc32;
114
115#ifdef CONFIG_IP_VS_IPV6
116 if (cp->af == AF_INET6)
117 sctphoff = sizeof(struct ipv6hdr);
118 else
119#endif
120 sctphoff = ip_hdrlen(skb);
121
122 /* csum_check requires unshared skb */
123 if (!skb_make_writable(skb, sctphoff + sizeof(*sctph)))
124 return 0;
125
126 if (unlikely(cp->app != NULL)) {
127 /* Some checks before mangling */
128 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
129 return 0;
130
131 /* Call application helper if needed */
132 if (!ip_vs_app_pkt_out(cp, skb))
133 return 0;
134 }
135
136 sctph = (void *) skb_network_header(skb) + sctphoff;
137 sctph->source = cp->vport;
138
139 /* Calculate the checksum */
140 crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
141 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next)
142 crc32 = sctp_update_cksum((u8 *) skb->data, skb_headlen(skb),
143 crc32);
144 crc32 = sctp_end_cksum(crc32);
145 sctph->checksum = crc32;
146
147 return 1;
148}
149
150static int
151sctp_dnat_handler(struct sk_buff *skb,
152 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
153{
154
155 sctp_sctphdr_t *sctph;
156 unsigned int sctphoff;
157 __be32 crc32;
158
159#ifdef CONFIG_IP_VS_IPV6
160 if (cp->af == AF_INET6)
161 sctphoff = sizeof(struct ipv6hdr);
162 else
163#endif
164 sctphoff = ip_hdrlen(skb);
165
166 /* csum_check requires unshared skb */
167 if (!skb_make_writable(skb, sctphoff + sizeof(*sctph)))
168 return 0;
169
170 if (unlikely(cp->app != NULL)) {
171 /* Some checks before mangling */
172 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
173 return 0;
174
175 /* Call application helper if needed */
176 if (!ip_vs_app_pkt_out(cp, skb))
177 return 0;
178 }
179
180 sctph = (void *) skb_network_header(skb) + sctphoff;
181 sctph->dest = cp->dport;
182
183 /* Calculate the checksum */
184 crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
185 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next)
186 crc32 = sctp_update_cksum((u8 *) skb->data, skb_headlen(skb),
187 crc32);
188 crc32 = sctp_end_cksum(crc32);
189 sctph->checksum = crc32;
190
191 return 1;
192}
193
194static int
195sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
196{
197 struct sk_buff *list = skb_shinfo(skb)->frag_list;
198 unsigned int sctphoff;
199 struct sctphdr *sh, _sctph;
200 __le32 cmp;
201 __le32 val;
202 __u32 tmp;
203
204#ifdef CONFIG_IP_VS_IPV6
205 if (af == AF_INET6)
206 sctphoff = sizeof(struct ipv6hdr);
207 else
208#endif
209 sctphoff = ip_hdrlen(skb);
210
211 sh = skb_header_pointer(skb, sctphoff, sizeof(_sctph), &_sctph);
212 if (sh == NULL)
213 return 0;
214
215 cmp = sh->checksum;
216
217 tmp = sctp_start_cksum((__u8 *) sh, skb_headlen(skb));
218 for (; list; list = list->next)
219 tmp = sctp_update_cksum((__u8 *) list->data,
220 skb_headlen(list), tmp);
221
222 val = sctp_end_cksum(tmp);
223
224 if (val != cmp) {
225 /* CRC failure, dump it. */
226 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
227 "Failed checksum for");
228 return 0;
229 }
230 return 1;
231}
232
233struct ipvs_sctp_nextstate {
234 int next_state;
235};
236enum ipvs_sctp_event_t {
237 IP_VS_SCTP_EVE_DATA_CLI,
238 IP_VS_SCTP_EVE_DATA_SER,
239 IP_VS_SCTP_EVE_INIT_CLI,
240 IP_VS_SCTP_EVE_INIT_SER,
241 IP_VS_SCTP_EVE_INIT_ACK_CLI,
242 IP_VS_SCTP_EVE_INIT_ACK_SER,
243 IP_VS_SCTP_EVE_COOKIE_ECHO_CLI,
244 IP_VS_SCTP_EVE_COOKIE_ECHO_SER,
245 IP_VS_SCTP_EVE_COOKIE_ACK_CLI,
246 IP_VS_SCTP_EVE_COOKIE_ACK_SER,
247 IP_VS_SCTP_EVE_ABORT_CLI,
248 IP_VS_SCTP_EVE__ABORT_SER,
249 IP_VS_SCTP_EVE_SHUT_CLI,
250 IP_VS_SCTP_EVE_SHUT_SER,
251 IP_VS_SCTP_EVE_SHUT_ACK_CLI,
252 IP_VS_SCTP_EVE_SHUT_ACK_SER,
253 IP_VS_SCTP_EVE_SHUT_COM_CLI,
254 IP_VS_SCTP_EVE_SHUT_COM_SER,
255 IP_VS_SCTP_EVE_LAST
256};
257
258static enum ipvs_sctp_event_t sctp_events[255] = {
259 IP_VS_SCTP_EVE_DATA_CLI,
260 IP_VS_SCTP_EVE_INIT_CLI,
261 IP_VS_SCTP_EVE_INIT_ACK_CLI,
262 IP_VS_SCTP_EVE_DATA_CLI,
263 IP_VS_SCTP_EVE_DATA_CLI,
264 IP_VS_SCTP_EVE_DATA_CLI,
265 IP_VS_SCTP_EVE_ABORT_CLI,
266 IP_VS_SCTP_EVE_SHUT_CLI,
267 IP_VS_SCTP_EVE_SHUT_ACK_CLI,
268 IP_VS_SCTP_EVE_DATA_CLI,
269 IP_VS_SCTP_EVE_COOKIE_ECHO_CLI,
270 IP_VS_SCTP_EVE_COOKIE_ACK_CLI,
271 IP_VS_SCTP_EVE_DATA_CLI,
272 IP_VS_SCTP_EVE_DATA_CLI,
273 IP_VS_SCTP_EVE_SHUT_COM_CLI,
274};
275
276static struct ipvs_sctp_nextstate
277 sctp_states_table[IP_VS_SCTP_S_LAST][IP_VS_SCTP_EVE_LAST] = {
278 /*
279 * STATE : IP_VS_SCTP_S_NONE
280 */
281 /*next state *//*event */
282 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
283 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
284 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
285 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
286 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
287 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
288 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
289 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
290 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
291 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
292 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
293 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
294 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
295 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
296 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
297 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
298 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
299 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ },
300 },
301 /*
302 * STATE : IP_VS_SCTP_S_INIT_CLI
303 * Cient sent INIT and is waiting for reply from server(In ECHO_WAIT)
304 */
305 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
306 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
307 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
308 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
309 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
310 {IP_VS_SCTP_S_INIT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
311 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ECHO_CLI */ },
312 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_ECHO_SER */ },
313 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
314 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
315 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
316 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
317 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
318 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
319 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
320 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
321 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
322 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
323 },
324 /*
325 * State : IP_VS_SCTP_S_INIT_SER
326 * Server sent INIT and waiting for INIT ACK from the client
327 */
328 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
329 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
330 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
331 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
332 {IP_VS_SCTP_S_INIT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
333 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
334 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
335 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
336 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
337 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
338 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
339 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
340 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
341 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
342 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
343 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
344 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
345 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
346 },
347 /*
348 * State : IP_VS_SCTP_S_INIT_ACK_CLI
349 * Client sent INIT ACK and waiting for ECHO from the server
350 */
351 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
352 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
353 /*
354 * We have got an INIT from client. From the spec.“Upon receipt of
355 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
356 * an INIT ACK using the same parameters it sent in its original
357 * INIT chunk (including its Initiate Tag, unchanged”).
358 */
359 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
360 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
361 /*
362 * INIT_ACK has been resent by the client, let us stay is in
363 * the same state
364 */
365 {IP_VS_SCTP_S_INIT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
366 /*
367 * INIT_ACK sent by the server, close the connection
368 */
369 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
370 /*
371 * ECHO by client, it should not happen, close the connection
372 */
373 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
374 /*
375 * ECHO by server, this is what we are expecting, move to ECHO_SER
376 */
377 {IP_VS_SCTP_S_ECHO_SER /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
378 /*
379 * COOKIE ACK from client, it should not happen, close the connection
380 */
381 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
382 /*
383 * Unexpected COOKIE ACK from server, staty in the same state
384 */
385 {IP_VS_SCTP_S_INIT_ACK_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
386 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
387 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
388 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
389 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
390 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
391 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
392 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
393 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
394 },
395 /*
396 * State : IP_VS_SCTP_S_INIT_ACK_SER
397 * Server sent INIT ACK and waiting for ECHO from the client
398 */
399 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
400 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
401 /*
402 * We have got an INIT from client. From the spec.“Upon receipt of
403 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
404 * an INIT ACK using the same parameters it sent in its original
405 * INIT chunk (including its Initiate Tag, unchanged”).
406 */
407 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
408 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
409 /*
410 * Unexpected INIT_ACK by the client, let us close the connection
411 */
412 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
413 /*
414 * INIT_ACK resent by the server, let us move to same state
415 */
416 {IP_VS_SCTP_S_INIT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
417 /*
418 * Client send the ECHO, this is what we are expecting,
419 * move to ECHO_CLI
420 */
421 {IP_VS_SCTP_S_ECHO_CLI /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
422 /*
423 * ECHO received from the server, Not sure what to do,
424 * let us close it
425 */
426 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
427 /*
428 * COOKIE ACK from client, let us stay in the same state
429 */
430 {IP_VS_SCTP_S_INIT_ACK_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
431 /*
432 * COOKIE ACK from server, hmm... this should not happen, lets close
433 * the connection.
434 */
435 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
436 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
437 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
438 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
439 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
440 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
441 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
442 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
443 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
444 },
445 /*
446 * State : IP_VS_SCTP_S_ECHO_CLI
447 * Cient sent ECHO and waiting COOKEI ACK from the Server
448 */
449 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
450 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
451 /*
452 * We have got an INIT from client. From the spec.“Upon receipt of
453 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
454 * an INIT ACK using the same parameters it sent in its original
455 * INIT chunk (including its Initiate Tag, unchanged”).
456 */
457 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
458 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
459 /*
460 * INIT_ACK has been by the client, let us close the connection
461 */
462 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
463 /*
464 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
465 * “If an INIT ACK is received by an endpoint in any state other
466 * than the COOKIE-WAIT state, the endpoint should discard the
467 * INIT ACK chunk”. Stay in the same state
468 */
469 {IP_VS_SCTP_S_ECHO_CLI /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
470 /*
471 * Client resent the ECHO, let us stay in the same state
472 */
473 {IP_VS_SCTP_S_ECHO_CLI /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
474 /*
475 * ECHO received from the server, Not sure what to do,
476 * let us close it
477 */
478 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
479 /*
480 * COOKIE ACK from client, this shoud not happen, let's close the
481 * connection
482 */
483 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
484 /*
485 * COOKIE ACK from server, this is what we are awaiting,lets move to
486 * ESTABLISHED.
487 */
488 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
489 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
490 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
491 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
492 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
493 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
494 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
495 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
496 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
497 },
498 /*
499 * State : IP_VS_SCTP_S_ECHO_SER
500 * Server sent ECHO and waiting COOKEI ACK from the client
501 */
502 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
503 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
504 /*
505 * We have got an INIT from client. From the spec.“Upon receipt of
506 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
507 * an INIT ACK using the same parameters it sent in its original
508 * INIT chunk (including its Initiate Tag, unchanged”).
509 */
510 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
511 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
512 /*
513 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
514 * “If an INIT ACK is received by an endpoint in any state other
515 * than the COOKIE-WAIT state, the endpoint should discard the
516 * INIT ACK chunk”. Stay in the same state
517 */
518 {IP_VS_SCTP_S_ECHO_SER /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
519 /*
520 * INIT_ACK has been by the server, let us close the connection
521 */
522 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
523 /*
524 * Client sent the ECHO, not sure what to do, let's close the
525 * connection.
526 */
527 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
528 /*
529 * ECHO resent by the server, stay in the same state
530 */
531 {IP_VS_SCTP_S_ECHO_SER /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
532 /*
533 * COOKIE ACK from client, this is what we are expecting, let's move
534 * to ESTABLISHED.
535 */
536 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
537 /*
538 * COOKIE ACK from server, this should not happen, lets close the
539 * connection.
540 */
541 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
542 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
543 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
544 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
545 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
546 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
547 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
548 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
549 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
550 },
551 /*
552 * State : IP_VS_SCTP_S_ESTABLISHED
553 * Association established
554 */
555 {{IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_DATA_CLI */ },
556 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_DATA_SER */ },
557 /*
558 * We have got an INIT from client. From the spec.“Upon receipt of
559 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
560 * an INIT ACK using the same parameters it sent in its original
561 * INIT chunk (including its Initiate Tag, unchanged”).
562 */
563 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
564 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
565 /*
566 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
567 * “If an INIT ACK is received by an endpoint in any state other
568 * than the COOKIE-WAIT state, the endpoint should discard the
569 * INIT ACK chunk”. Stay in the same state
570 */
571 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
572 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
573 /*
574 * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
575 * peer and peer shall move to the ESTABISHED. if it doesn't handle
576 * it will send ERROR chunk. So, stay in the same state
577 */
578 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
579 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
580 /*
581 * COOKIE ACK from client, not sure what to do stay in the same state
582 */
583 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
584 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
585 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
586 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
587 /*
588 * SHUTDOWN from the client, move to SHUDDOWN_CLI
589 */
590 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
591 /*
592 * SHUTDOWN from the server, move to SHUTDOWN_SER
593 */
594 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
595 /*
596 * client sent SHUDTDOWN_ACK, this should not happen, let's close
597 * the connection
598 */
599 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
600 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
601 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
602 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
603 },
604 /*
605 * State : IP_VS_SCTP_S_SHUT_CLI
606 * SHUTDOWN sent from the client, waitinf for SHUT ACK from the server
607 */
608 /*
609 * We recieved the data chuck, keep the state unchanged. I assume
610 * that still data chuncks can be received by both the peers in
611 * SHUDOWN state
612 */
613
614 {{IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_DATA_CLI */ },
615 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_DATA_SER */ },
616 /*
617 * We have got an INIT from client. From the spec.“Upon receipt of
618 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
619 * an INIT ACK using the same parameters it sent in its original
620 * INIT chunk (including its Initiate Tag, unchanged”).
621 */
622 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
623 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
624 /*
625 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
626 * “If an INIT ACK is received by an endpoint in any state other
627 * than the COOKIE-WAIT state, the endpoint should discard the
628 * INIT ACK chunk”. Stay in the same state
629 */
630 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
631 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
632 /*
633 * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
634 * peer and peer shall move to the ESTABISHED. if it doesn't handle
635 * it will send ERROR chunk. So, stay in the same state
636 */
637 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
638 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
639 /*
640 * COOKIE ACK from client, not sure what to do stay in the same state
641 */
642 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
643 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
644 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
645 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
646 /*
647 * SHUTDOWN resent from the client, move to SHUDDOWN_CLI
648 */
649 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
650 /*
651 * SHUTDOWN from the server, move to SHUTDOWN_SER
652 */
653 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
654 /*
655 * client sent SHUDTDOWN_ACK, this should not happen, let's close
656 * the connection
657 */
658 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
659 /*
660 * Server sent SHUTDOWN ACK, this is what we are expecting, let's move
661 * to SHUDOWN_ACK_SER
662 */
663 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
664 /*
665 * SHUTDOWN COM from client, this should not happen, let's close the
666 * connection
667 */
668 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
669 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
670 },
671 /*
672 * State : IP_VS_SCTP_S_SHUT_SER
673 * SHUTDOWN sent from the server, waitinf for SHUTDOWN ACK from client
674 */
675 /*
676 * We recieved the data chuck, keep the state unchanged. I assume
677 * that still data chuncks can be received by both the peers in
678 * SHUDOWN state
679 */
680
681 {{IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_DATA_CLI */ },
682 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_DATA_SER */ },
683 /*
684 * We have got an INIT from client. From the spec.“Upon receipt of
685 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
686 * an INIT ACK using the same parameters it sent in its original
687 * INIT chunk (including its Initiate Tag, unchanged”).
688 */
689 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
690 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
691 /*
692 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
693 * “If an INIT ACK is received by an endpoint in any state other
694 * than the COOKIE-WAIT state, the endpoint should discard the
695 * INIT ACK chunk”. Stay in the same state
696 */
697 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
698 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
699 /*
700 * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
701 * peer and peer shall move to the ESTABISHED. if it doesn't handle
702 * it will send ERROR chunk. So, stay in the same state
703 */
704 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
705 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
706 /*
707 * COOKIE ACK from client, not sure what to do stay in the same state
708 */
709 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
710 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
711 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
712 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
713 /*
714 * SHUTDOWN resent from the client, move to SHUDDOWN_CLI
715 */
716 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
717 /*
718 * SHUTDOWN resent from the server, move to SHUTDOWN_SER
719 */
720 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
721 /*
722 * client sent SHUDTDOWN_ACK, this is what we are expecting, let's
723 * move to SHUT_ACK_CLI
724 */
725 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
726 /*
727 * Server sent SHUTDOWN ACK, this should not happen, let's close the
728 * connection
729 */
730 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
731 /*
732 * SHUTDOWN COM from client, this should not happen, let's close the
733 * connection
734 */
735 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
736 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
737 },
738
739 /*
740 * State : IP_VS_SCTP_S_SHUT_ACK_CLI
741 * SHUTDOWN ACK from the client, awaiting for SHUTDOWN COM from server
742 */
743 /*
744 * We recieved the data chuck, keep the state unchanged. I assume
745 * that still data chuncks can be received by both the peers in
746 * SHUDOWN state
747 */
748
749 {{IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_DATA_CLI */ },
750 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_DATA_SER */ },
751 /*
752 * We have got an INIT from client. From the spec.“Upon receipt of
753 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
754 * an INIT ACK using the same parameters it sent in its original
755 * INIT chunk (including its Initiate Tag, unchanged”).
756 */
757 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
758 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
759 /*
760 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
761 * “If an INIT ACK is received by an endpoint in any state other
762 * than the COOKIE-WAIT state, the endpoint should discard the
763 * INIT ACK chunk”. Stay in the same state
764 */
765 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
766 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
767 /*
768 * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
769 * peer and peer shall move to the ESTABISHED. if it doesn't handle
770 * it will send ERROR chunk. So, stay in the same state
771 */
772 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
773 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
774 /*
775 * COOKIE ACK from client, not sure what to do stay in the same state
776 */
777 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
778 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
779 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
780 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
781 /*
782 * SHUTDOWN sent from the client, move to SHUDDOWN_CLI
783 */
784 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
785 /*
786 * SHUTDOWN sent from the server, move to SHUTDOWN_SER
787 */
788 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
789 /*
790 * client resent SHUDTDOWN_ACK, let's stay in the same state
791 */
792 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
793 /*
794 * Server sent SHUTDOWN ACK, this should not happen, let's close the
795 * connection
796 */
797 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
798 /*
799 * SHUTDOWN COM from client, this should not happen, let's close the
800 * connection
801 */
802 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
803 /*
804 * SHUTDOWN COMPLETE from server this is what we are expecting.
805 */
806 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
807 },
808
809 /*
810 * State : IP_VS_SCTP_S_SHUT_ACK_SER
811 * SHUTDOWN ACK from the server, awaiting for SHUTDOWN COM from client
812 */
813 /*
814 * We recieved the data chuck, keep the state unchanged. I assume
815 * that still data chuncks can be received by both the peers in
816 * SHUDOWN state
817 */
818
819 {{IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_DATA_CLI */ },
820 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_DATA_SER */ },
821 /*
822 * We have got an INIT from client. From the spec.“Upon receipt of
823 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
824 * an INIT ACK using the same parameters it sent in its original
825 * INIT chunk (including its Initiate Tag, unchanged”).
826 */
827 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
828 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
829 /*
830 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
831 * “If an INIT ACK is received by an endpoint in any state other
832 * than the COOKIE-WAIT state, the endpoint should discard the
833 * INIT ACK chunk”. Stay in the same state
834 */
835 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
836 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
837 /*
838 * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
839 * peer and peer shall move to the ESTABISHED. if it doesn't handle
840 * it will send ERROR chunk. So, stay in the same state
841 */
842 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
843 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
844 /*
845 * COOKIE ACK from client, not sure what to do stay in the same state
846 */
847 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
848 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
849 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
850 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
851 /*
852 * SHUTDOWN sent from the client, move to SHUDDOWN_CLI
853 */
854 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
855 /*
856 * SHUTDOWN sent from the server, move to SHUTDOWN_SER
857 */
858 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
859 /*
860 * client sent SHUDTDOWN_ACK, this should not happen let's close
861 * the connection.
862 */
863 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
864 /*
865 * Server resent SHUTDOWN ACK, stay in the same state
866 */
867 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
868 /*
869 * SHUTDOWN COM from client, this what we are expecting, let's close
870 * the connection
871 */
872 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
873 /*
874 * SHUTDOWN COMPLETE from server this should not happen.
875 */
876 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
877 },
878 /*
879 * State : IP_VS_SCTP_S_CLOSED
880 */
881 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
882 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
883 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
884 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
885 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
886 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
887 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
888 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
889 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
890 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
891 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
892 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
893 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
894 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
895 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
896 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
897 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
898 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
899 }
900};
901
902/*
903 * Timeout table[state]
904 */
905static int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = {
906 [IP_VS_SCTP_S_NONE] = 2 * HZ,
907 [IP_VS_SCTP_S_INIT_CLI] = 1 * 60 * HZ,
908 [IP_VS_SCTP_S_INIT_SER] = 1 * 60 * HZ,
909 [IP_VS_SCTP_S_INIT_ACK_CLI] = 1 * 60 * HZ,
910 [IP_VS_SCTP_S_INIT_ACK_SER] = 1 * 60 * HZ,
911 [IP_VS_SCTP_S_ECHO_CLI] = 1 * 60 * HZ,
912 [IP_VS_SCTP_S_ECHO_SER] = 1 * 60 * HZ,
913 [IP_VS_SCTP_S_ESTABLISHED] = 15 * 60 * HZ,
914 [IP_VS_SCTP_S_SHUT_CLI] = 1 * 60 * HZ,
915 [IP_VS_SCTP_S_SHUT_SER] = 1 * 60 * HZ,
916 [IP_VS_SCTP_S_SHUT_ACK_CLI] = 1 * 60 * HZ,
917 [IP_VS_SCTP_S_SHUT_ACK_SER] = 1 * 60 * HZ,
918 [IP_VS_SCTP_S_CLOSED] = 10 * HZ,
919 [IP_VS_SCTP_S_LAST] = 2 * HZ,
920};
921
922static const char *sctp_state_name_table[IP_VS_SCTP_S_LAST + 1] = {
923 [IP_VS_SCTP_S_NONE] = "NONE",
924 [IP_VS_SCTP_S_INIT_CLI] = "INIT_CLI",
925 [IP_VS_SCTP_S_INIT_SER] = "INIT_SER",
926 [IP_VS_SCTP_S_INIT_ACK_CLI] = "INIT_ACK_CLI",
927 [IP_VS_SCTP_S_INIT_ACK_SER] = "INIT_ACK_SER",
928 [IP_VS_SCTP_S_ECHO_CLI] = "COOKIE_ECHO_CLI",
929 [IP_VS_SCTP_S_ECHO_SER] = "COOKIE_ECHO_SER",
930 [IP_VS_SCTP_S_ESTABLISHED] = "ESTABISHED",
931 [IP_VS_SCTP_S_SHUT_CLI] = "SHUTDOWN_CLI",
932 [IP_VS_SCTP_S_SHUT_SER] = "SHUTDOWN_SER",
933 [IP_VS_SCTP_S_SHUT_ACK_CLI] = "SHUTDOWN_ACK_CLI",
934 [IP_VS_SCTP_S_SHUT_ACK_SER] = "SHUTDOWN_ACK_SER",
935 [IP_VS_SCTP_S_CLOSED] = "CLOSED",
936 [IP_VS_SCTP_S_LAST] = "BUG!"
937};
938
939
940static const char *sctp_state_name(int state)
941{
942 if (state >= IP_VS_SCTP_S_LAST)
943 return "ERR!";
944 if (sctp_state_name_table[state])
945 return sctp_state_name_table[state];
946 return "?";
947}
948
949static void sctp_timeout_change(struct ip_vs_protocol *pp, int flags)
950{
951}
952
953static int
954sctp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
955{
956
957return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_SCTP_S_LAST,
958 sctp_state_name_table, sname, to);
959}
960
961static inline int
962set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
963 int direction, const struct sk_buff *skb)
964{
965 sctp_chunkhdr_t _sctpch, *sch;
966 unsigned char chunk_type;
967 int event, next_state;
968 int ihl;
969
970#ifdef CONFIG_IP_VS_IPV6
971 ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
972#else
973 ihl = ip_hdrlen(skb);
974#endif
975
976 sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t),
977 sizeof(_sctpch), &_sctpch);
978 if (sch == NULL)
979 return 0;
980
981 chunk_type = sch->type;
982 /*
983 * Section 3: Multiple chunks can be bundled into one SCTP packet
984 * up to the MTU size, except for the INIT, INIT ACK, and
985 * SHUTDOWN COMPLETE chunks. These chunks MUST NOT be bundled with
986 * any other chunk in a packet.
987 *
988 * Section 3.3.7: DATA chunks MUST NOT be bundled with ABORT. Control
989 * chunks (except for INIT, INIT ACK, and SHUTDOWN COMPLETE) MAY be
990 * bundled with an ABORT, but they MUST be placed before the ABORT
991 * in the SCTP packet or they will be ignored by the receiver.
992 */
993 if ((sch->type == SCTP_CID_COOKIE_ECHO) ||
994 (sch->type == SCTP_CID_COOKIE_ACK)) {
995 sch = skb_header_pointer(skb, (ihl + sizeof(sctp_sctphdr_t) +
996 sch->length), sizeof(_sctpch), &_sctpch);
997 if (sch) {
998 if (sch->type == SCTP_CID_ABORT)
999 chunk_type = sch->type;
1000 }
1001 }
1002
1003 event = sctp_events[chunk_type];
1004
1005 /*
1006 * If the direction is IP_VS_DIR_OUTPUT, this event is from server
1007 */
1008 if (direction == IP_VS_DIR_OUTPUT)
1009 event++;
1010 /*
1011 * get next state
1012 */
1013 next_state = sctp_states_table[cp->state][event].next_state;
1014
1015 if (next_state != cp->state) {
1016 struct ip_vs_dest *dest = cp->dest;
1017
1018 IP_VS_DBG_BUF(8, "%s %s %s:%d->"
1019 "%s:%d state: %s->%s conn->refcnt:%d\n",
1020 pp->name,
1021 ((direction == IP_VS_DIR_OUTPUT) ?
1022 "output " : "input "),
1023 IP_VS_DBG_ADDR(cp->af, &cp->daddr),
1024 ntohs(cp->dport),
1025 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
1026 ntohs(cp->cport),
1027 sctp_state_name(cp->state),
1028 sctp_state_name(next_state),
1029 atomic_read(&cp->refcnt));
1030 if (dest) {
1031 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
1032 (next_state != IP_VS_SCTP_S_ESTABLISHED)) {
1033 atomic_dec(&dest->activeconns);
1034 atomic_inc(&dest->inactconns);
1035 cp->flags |= IP_VS_CONN_F_INACTIVE;
1036 } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
1037 (next_state == IP_VS_SCTP_S_ESTABLISHED)) {
1038 atomic_inc(&dest->activeconns);
1039 atomic_dec(&dest->inactconns);
1040 cp->flags &= ~IP_VS_CONN_F_INACTIVE;
1041 }
1042 }
1043 }
1044
1045 cp->timeout = pp->timeout_table[cp->state = next_state];
1046
1047 return 1;
1048}
1049
1050static int
1051sctp_state_transition(struct ip_vs_conn *cp, int direction,
1052 const struct sk_buff *skb, struct ip_vs_protocol *pp)
1053{
1054 int ret = 0;
1055
1056 spin_lock(&cp->lock);
1057 ret = set_sctp_state(pp, cp, direction, skb);
1058 spin_unlock(&cp->lock);
1059
1060 return ret;
1061}
1062
1063/*
1064 * Hash table for SCTP application incarnations
1065 */
1066#define SCTP_APP_TAB_BITS 4
1067#define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS)
1068#define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1)
1069
1070static struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
1071static DEFINE_SPINLOCK(sctp_app_lock);
1072
1073static inline __u16 sctp_app_hashkey(__be16 port)
1074{
1075 return (((__force u16)port >> SCTP_APP_TAB_BITS) ^ (__force u16)port)
1076 & SCTP_APP_TAB_MASK;
1077}
1078
1079static int sctp_register_app(struct ip_vs_app *inc)
1080{
1081 struct ip_vs_app *i;
1082 __u16 hash;
1083 __be16 port = inc->port;
1084 int ret = 0;
1085
1086 hash = sctp_app_hashkey(port);
1087
1088 spin_lock_bh(&sctp_app_lock);
1089 list_for_each_entry(i, &sctp_apps[hash], p_list) {
1090 if (i->port == port) {
1091 ret = -EEXIST;
1092 goto out;
1093 }
1094 }
1095 list_add(&inc->p_list, &sctp_apps[hash]);
1096 atomic_inc(&ip_vs_protocol_sctp.appcnt);
1097out:
1098 spin_unlock_bh(&sctp_app_lock);
1099
1100 return ret;
1101}
1102
1103static void sctp_unregister_app(struct ip_vs_app *inc)
1104{
1105 spin_lock_bh(&sctp_app_lock);
1106 atomic_dec(&ip_vs_protocol_sctp.appcnt);
1107 list_del(&inc->p_list);
1108 spin_unlock_bh(&sctp_app_lock);
1109}
1110
1111static int sctp_app_conn_bind(struct ip_vs_conn *cp)
1112{
1113 int hash;
1114 struct ip_vs_app *inc;
1115 int result = 0;
1116
1117 /* Default binding: bind app only for NAT */
1118 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
1119 return 0;
1120 /* Lookup application incarnations and bind the right one */
1121 hash = sctp_app_hashkey(cp->vport);
1122
1123 spin_lock(&sctp_app_lock);
1124 list_for_each_entry(inc, &sctp_apps[hash], p_list) {
1125 if (inc->port == cp->vport) {
1126 if (unlikely(!ip_vs_app_inc_get(inc)))
1127 break;
1128 spin_unlock(&sctp_app_lock);
1129
1130 IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
1131 "%s:%u to app %s on port %u\n",
1132 __func__,
1133 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
1134 ntohs(cp->cport),
1135 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
1136 ntohs(cp->vport),
1137 inc->name, ntohs(inc->port));
1138 cp->app = inc;
1139 if (inc->init_conn)
1140 result = inc->init_conn(inc, cp);
1141 goto out;
1142 }
1143 }
1144 spin_unlock(&sctp_app_lock);
1145out:
1146 return result;
1147}
1148
1149static void ip_vs_sctp_init(struct ip_vs_protocol *pp)
1150{
1151 IP_VS_INIT_HASH_TABLE(sctp_apps);
1152 pp->timeout_table = sctp_timeouts;
1153}
1154
1155
1156static void ip_vs_sctp_exit(struct ip_vs_protocol *pp)
1157{
1158
1159}
1160
1161struct ip_vs_protocol ip_vs_protocol_sctp = {
1162 .name = "SCTP",
1163 .protocol = IPPROTO_SCTP,
1164 .num_states = IP_VS_SCTP_S_LAST,
1165 .dont_defrag = 0,
1166 .appcnt = ATOMIC_INIT(0),
1167 .init = ip_vs_sctp_init,
1168 .exit = ip_vs_sctp_exit,
1169 .register_app = sctp_register_app,
1170 .unregister_app = sctp_unregister_app,
1171 .conn_schedule = sctp_conn_schedule,
1172 .conn_in_get = sctp_conn_in_get,
1173 .conn_out_get = sctp_conn_out_get,
1174 .snat_handler = sctp_snat_handler,
1175 .dnat_handler = sctp_dnat_handler,
1176 .csum_check = sctp_csum_check,
1177 .state_name = sctp_state_name,
1178 .state_transition = sctp_state_transition,
1179 .app_conn_bind = sctp_app_conn_bind,
1180 .debug_packet = ip_vs_tcpudp_debug_packet,
1181 .timeout_change = sctp_timeout_change,
1182 .set_state_timeout = sctp_set_state_timeout,
1183};
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index e177f0dc2084..8fb0ae616761 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -400,6 +400,11 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
400 flags |= IP_VS_CONN_F_INACTIVE; 400 flags |= IP_VS_CONN_F_INACTIVE;
401 else 401 else
402 flags &= ~IP_VS_CONN_F_INACTIVE; 402 flags &= ~IP_VS_CONN_F_INACTIVE;
403 } else if (s->protocol == IPPROTO_SCTP) {
404 if (state != IP_VS_SCTP_S_ESTABLISHED)
405 flags |= IP_VS_CONN_F_INACTIVE;
406 else
407 flags &= ~IP_VS_CONN_F_INACTIVE;
403 } 408 }
404 cp = ip_vs_conn_new(AF_INET, s->protocol, 409 cp = ip_vs_conn_new(AF_INET, s->protocol,
405 (union nf_inet_addr *)&s->caddr, 410 (union nf_inet_addr *)&s->caddr,
@@ -434,6 +439,15 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
434 atomic_dec(&dest->inactconns); 439 atomic_dec(&dest->inactconns);
435 cp->flags &= ~IP_VS_CONN_F_INACTIVE; 440 cp->flags &= ~IP_VS_CONN_F_INACTIVE;
436 } 441 }
442 } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
443 (cp->state != state)) {
444 dest = cp->dest;
445 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
446 (state != IP_VS_SCTP_S_ESTABLISHED)) {
447 atomic_dec(&dest->activeconns);
448 atomic_inc(&dest->inactconns);
449 cp->flags &= ~IP_VS_CONN_F_INACTIVE;
450 }
437 } 451 }
438 452
439 if (opt) 453 if (opt)
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index 6182e8ea0be7..3c115fc19784 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -24,6 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/net.h> 26#include <linux/net.h>
27#include <linux/gcd.h>
27 28
28#include <net/ip_vs.h> 29#include <net/ip_vs.h>
29 30
@@ -38,20 +39,6 @@ struct ip_vs_wrr_mark {
38}; 39};
39 40
40 41
41/*
42 * Get the gcd of server weights
43 */
44static int gcd(int a, int b)
45{
46 int c;
47
48 while ((c = a % b)) {
49 a = b;
50 b = c;
51 }
52 return b;
53}
54
55static int ip_vs_wrr_gcd_weight(struct ip_vs_service *svc) 42static int ip_vs_wrr_gcd_weight(struct ip_vs_service *svc)
56{ 43{
57 struct ip_vs_dest *dest; 44 struct ip_vs_dest *dest;
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 30b3189bd29c..223b5018c7dc 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -311,7 +311,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
311 mtu = dst_mtu(&rt->u.dst); 311 mtu = dst_mtu(&rt->u.dst);
312 if (skb->len > mtu) { 312 if (skb->len > mtu) {
313 dst_release(&rt->u.dst); 313 dst_release(&rt->u.dst);
314 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 314 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
315 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 315 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
316 goto tx_error; 316 goto tx_error;
317 } 317 }
@@ -454,7 +454,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
454 mtu = dst_mtu(&rt->u.dst); 454 mtu = dst_mtu(&rt->u.dst);
455 if (skb->len > mtu) { 455 if (skb->len > mtu) {
456 dst_release(&rt->u.dst); 456 dst_release(&rt->u.dst);
457 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 457 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
458 IP_VS_DBG_RL_PKT(0, pp, skb, 0, 458 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
459 "ip_vs_nat_xmit_v6(): frag needed for"); 459 "ip_vs_nat_xmit_v6(): frag needed for");
460 goto tx_error; 460 goto tx_error;
@@ -672,7 +672,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
672 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); 672 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
673 673
674 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) { 674 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
675 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 675 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
676 dst_release(&rt->u.dst); 676 dst_release(&rt->u.dst);
677 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 677 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
678 goto tx_error; 678 goto tx_error;
@@ -814,7 +814,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
814 /* MTU checking */ 814 /* MTU checking */
815 mtu = dst_mtu(&rt->u.dst); 815 mtu = dst_mtu(&rt->u.dst);
816 if (skb->len > mtu) { 816 if (skb->len > mtu) {
817 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 817 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
818 dst_release(&rt->u.dst); 818 dst_release(&rt->u.dst);
819 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 819 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
820 goto tx_error; 820 goto tx_error;
@@ -965,7 +965,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
965 mtu = dst_mtu(&rt->u.dst); 965 mtu = dst_mtu(&rt->u.dst);
966 if (skb->len > mtu) { 966 if (skb->len > mtu) {
967 dst_release(&rt->u.dst); 967 dst_release(&rt->u.dst);
968 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 968 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
969 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 969 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
970 goto tx_error; 970 goto tx_error;
971 } 971 }
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0e98c3282d42..0c9bbe93cc16 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -30,6 +30,7 @@
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/socket.h> 31#include <linux/socket.h>
32#include <linux/mm.h> 32#include <linux/mm.h>
33#include <linux/nsproxy.h>
33#include <linux/rculist_nulls.h> 34#include <linux/rculist_nulls.h>
34 35
35#include <net/netfilter/nf_conntrack.h> 36#include <net/netfilter/nf_conntrack.h>
@@ -41,6 +42,7 @@
41#include <net/netfilter/nf_conntrack_extend.h> 42#include <net/netfilter/nf_conntrack_extend.h>
42#include <net/netfilter/nf_conntrack_acct.h> 43#include <net/netfilter/nf_conntrack_acct.h>
43#include <net/netfilter/nf_conntrack_ecache.h> 44#include <net/netfilter/nf_conntrack_ecache.h>
45#include <net/netfilter/nf_conntrack_zones.h>
44#include <net/netfilter/nf_nat.h> 46#include <net/netfilter/nf_nat.h>
45#include <net/netfilter/nf_nat_core.h> 47#include <net/netfilter/nf_nat_core.h>
46 48
@@ -63,13 +65,11 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max);
63struct nf_conn nf_conntrack_untracked __read_mostly; 65struct nf_conn nf_conntrack_untracked __read_mostly;
64EXPORT_SYMBOL_GPL(nf_conntrack_untracked); 66EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
65 67
66static struct kmem_cache *nf_conntrack_cachep __read_mostly;
67
68static int nf_conntrack_hash_rnd_initted; 68static int nf_conntrack_hash_rnd_initted;
69static unsigned int nf_conntrack_hash_rnd; 69static unsigned int nf_conntrack_hash_rnd;
70 70
71static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, 71static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
72 unsigned int size, unsigned int rnd) 72 u16 zone, unsigned int size, unsigned int rnd)
73{ 73{
74 unsigned int n; 74 unsigned int n;
75 u_int32_t h; 75 u_int32_t h;
@@ -80,15 +80,16 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
80 */ 80 */
81 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); 81 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
82 h = jhash2((u32 *)tuple, n, 82 h = jhash2((u32 *)tuple, n,
83 rnd ^ (((__force __u16)tuple->dst.u.all << 16) | 83 zone ^ rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
84 tuple->dst.protonum)); 84 tuple->dst.protonum));
85 85
86 return ((u64)h * size) >> 32; 86 return ((u64)h * size) >> 32;
87} 87}
88 88
89static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) 89static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
90 const struct nf_conntrack_tuple *tuple)
90{ 91{
91 return __hash_conntrack(tuple, nf_conntrack_htable_size, 92 return __hash_conntrack(tuple, zone, net->ct.htable_size,
92 nf_conntrack_hash_rnd); 93 nf_conntrack_hash_rnd);
93} 94}
94 95
@@ -292,11 +293,12 @@ static void death_by_timeout(unsigned long ul_conntrack)
292 * - Caller must lock nf_conntrack_lock before calling this function 293 * - Caller must lock nf_conntrack_lock before calling this function
293 */ 294 */
294struct nf_conntrack_tuple_hash * 295struct nf_conntrack_tuple_hash *
295__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple) 296__nf_conntrack_find(struct net *net, u16 zone,
297 const struct nf_conntrack_tuple *tuple)
296{ 298{
297 struct nf_conntrack_tuple_hash *h; 299 struct nf_conntrack_tuple_hash *h;
298 struct hlist_nulls_node *n; 300 struct hlist_nulls_node *n;
299 unsigned int hash = hash_conntrack(tuple); 301 unsigned int hash = hash_conntrack(net, zone, tuple);
300 302
301 /* Disable BHs the entire time since we normally need to disable them 303 /* Disable BHs the entire time since we normally need to disable them
302 * at least once for the stats anyway. 304 * at least once for the stats anyway.
@@ -304,7 +306,8 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
304 local_bh_disable(); 306 local_bh_disable();
305begin: 307begin:
306 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { 308 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
307 if (nf_ct_tuple_equal(tuple, &h->tuple)) { 309 if (nf_ct_tuple_equal(tuple, &h->tuple) &&
310 nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
308 NF_CT_STAT_INC(net, found); 311 NF_CT_STAT_INC(net, found);
309 local_bh_enable(); 312 local_bh_enable();
310 return h; 313 return h;
@@ -326,21 +329,23 @@ EXPORT_SYMBOL_GPL(__nf_conntrack_find);
326 329
327/* Find a connection corresponding to a tuple. */ 330/* Find a connection corresponding to a tuple. */
328struct nf_conntrack_tuple_hash * 331struct nf_conntrack_tuple_hash *
329nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple) 332nf_conntrack_find_get(struct net *net, u16 zone,
333 const struct nf_conntrack_tuple *tuple)
330{ 334{
331 struct nf_conntrack_tuple_hash *h; 335 struct nf_conntrack_tuple_hash *h;
332 struct nf_conn *ct; 336 struct nf_conn *ct;
333 337
334 rcu_read_lock(); 338 rcu_read_lock();
335begin: 339begin:
336 h = __nf_conntrack_find(net, tuple); 340 h = __nf_conntrack_find(net, zone, tuple);
337 if (h) { 341 if (h) {
338 ct = nf_ct_tuplehash_to_ctrack(h); 342 ct = nf_ct_tuplehash_to_ctrack(h);
339 if (unlikely(nf_ct_is_dying(ct) || 343 if (unlikely(nf_ct_is_dying(ct) ||
340 !atomic_inc_not_zero(&ct->ct_general.use))) 344 !atomic_inc_not_zero(&ct->ct_general.use)))
341 h = NULL; 345 h = NULL;
342 else { 346 else {
343 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) { 347 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
348 nf_ct_zone(ct) != zone)) {
344 nf_ct_put(ct); 349 nf_ct_put(ct);
345 goto begin; 350 goto begin;
346 } 351 }
@@ -366,10 +371,13 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
366 371
367void nf_conntrack_hash_insert(struct nf_conn *ct) 372void nf_conntrack_hash_insert(struct nf_conn *ct)
368{ 373{
374 struct net *net = nf_ct_net(ct);
369 unsigned int hash, repl_hash; 375 unsigned int hash, repl_hash;
376 u16 zone;
370 377
371 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 378 zone = nf_ct_zone(ct);
372 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 379 hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
380 repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
373 381
374 __nf_conntrack_hash_insert(ct, hash, repl_hash); 382 __nf_conntrack_hash_insert(ct, hash, repl_hash);
375} 383}
@@ -386,6 +394,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
386 struct hlist_nulls_node *n; 394 struct hlist_nulls_node *n;
387 enum ip_conntrack_info ctinfo; 395 enum ip_conntrack_info ctinfo;
388 struct net *net; 396 struct net *net;
397 u16 zone;
389 398
390 ct = nf_ct_get(skb, &ctinfo); 399 ct = nf_ct_get(skb, &ctinfo);
391 net = nf_ct_net(ct); 400 net = nf_ct_net(ct);
@@ -397,8 +406,9 @@ __nf_conntrack_confirm(struct sk_buff *skb)
397 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) 406 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
398 return NF_ACCEPT; 407 return NF_ACCEPT;
399 408
400 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 409 zone = nf_ct_zone(ct);
401 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 410 hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
411 repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
402 412
403 /* We're not in hash table, and we refuse to set up related 413 /* We're not in hash table, and we refuse to set up related
404 connections for unconfirmed conns. But packet copies and 414 connections for unconfirmed conns. But packet copies and
@@ -417,11 +427,13 @@ __nf_conntrack_confirm(struct sk_buff *skb)
417 not in the hash. If there is, we lost race. */ 427 not in the hash. If there is, we lost race. */
418 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) 428 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
419 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 429 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
420 &h->tuple)) 430 &h->tuple) &&
431 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
421 goto out; 432 goto out;
422 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode) 433 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
423 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, 434 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
424 &h->tuple)) 435 &h->tuple) &&
436 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
425 goto out; 437 goto out;
426 438
427 /* Remove from unconfirmed list */ 439 /* Remove from unconfirmed list */
@@ -468,15 +480,19 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
468 struct net *net = nf_ct_net(ignored_conntrack); 480 struct net *net = nf_ct_net(ignored_conntrack);
469 struct nf_conntrack_tuple_hash *h; 481 struct nf_conntrack_tuple_hash *h;
470 struct hlist_nulls_node *n; 482 struct hlist_nulls_node *n;
471 unsigned int hash = hash_conntrack(tuple); 483 struct nf_conn *ct;
484 u16 zone = nf_ct_zone(ignored_conntrack);
485 unsigned int hash = hash_conntrack(net, zone, tuple);
472 486
473 /* Disable BHs the entire time since we need to disable them at 487 /* Disable BHs the entire time since we need to disable them at
474 * least once for the stats anyway. 488 * least once for the stats anyway.
475 */ 489 */
476 rcu_read_lock_bh(); 490 rcu_read_lock_bh();
477 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { 491 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
478 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && 492 ct = nf_ct_tuplehash_to_ctrack(h);
479 nf_ct_tuple_equal(tuple, &h->tuple)) { 493 if (ct != ignored_conntrack &&
494 nf_ct_tuple_equal(tuple, &h->tuple) &&
495 nf_ct_zone(ct) == zone) {
480 NF_CT_STAT_INC(net, found); 496 NF_CT_STAT_INC(net, found);
481 rcu_read_unlock_bh(); 497 rcu_read_unlock_bh();
482 return 1; 498 return 1;
@@ -503,7 +519,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
503 int dropped = 0; 519 int dropped = 0;
504 520
505 rcu_read_lock(); 521 rcu_read_lock();
506 for (i = 0; i < nf_conntrack_htable_size; i++) { 522 for (i = 0; i < net->ct.htable_size; i++) {
507 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], 523 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
508 hnnode) { 524 hnnode) {
509 tmp = nf_ct_tuplehash_to_ctrack(h); 525 tmp = nf_ct_tuplehash_to_ctrack(h);
@@ -523,7 +539,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
523 if (cnt >= NF_CT_EVICTION_RANGE) 539 if (cnt >= NF_CT_EVICTION_RANGE)
524 break; 540 break;
525 541
526 hash = (hash + 1) % nf_conntrack_htable_size; 542 hash = (hash + 1) % net->ct.htable_size;
527 } 543 }
528 rcu_read_unlock(); 544 rcu_read_unlock();
529 545
@@ -539,7 +555,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
539 return dropped; 555 return dropped;
540} 556}
541 557
542struct nf_conn *nf_conntrack_alloc(struct net *net, 558struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
543 const struct nf_conntrack_tuple *orig, 559 const struct nf_conntrack_tuple *orig,
544 const struct nf_conntrack_tuple *repl, 560 const struct nf_conntrack_tuple *repl,
545 gfp_t gfp) 561 gfp_t gfp)
@@ -557,7 +573,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
557 573
558 if (nf_conntrack_max && 574 if (nf_conntrack_max &&
559 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { 575 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
560 unsigned int hash = hash_conntrack(orig); 576 unsigned int hash = hash_conntrack(net, zone, orig);
561 if (!early_drop(net, hash)) { 577 if (!early_drop(net, hash)) {
562 atomic_dec(&net->ct.count); 578 atomic_dec(&net->ct.count);
563 if (net_ratelimit()) 579 if (net_ratelimit())
@@ -572,7 +588,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
572 * Do not use kmem_cache_zalloc(), as this cache uses 588 * Do not use kmem_cache_zalloc(), as this cache uses
573 * SLAB_DESTROY_BY_RCU. 589 * SLAB_DESTROY_BY_RCU.
574 */ 590 */
575 ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); 591 ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
576 if (ct == NULL) { 592 if (ct == NULL) {
577 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); 593 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
578 atomic_dec(&net->ct.count); 594 atomic_dec(&net->ct.count);
@@ -594,13 +610,28 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
594#ifdef CONFIG_NET_NS 610#ifdef CONFIG_NET_NS
595 ct->ct_net = net; 611 ct->ct_net = net;
596#endif 612#endif
597 613#ifdef CONFIG_NF_CONNTRACK_ZONES
614 if (zone) {
615 struct nf_conntrack_zone *nf_ct_zone;
616
617 nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
618 if (!nf_ct_zone)
619 goto out_free;
620 nf_ct_zone->id = zone;
621 }
622#endif
598 /* 623 /*
599 * changes to lookup keys must be done before setting refcnt to 1 624 * changes to lookup keys must be done before setting refcnt to 1
600 */ 625 */
601 smp_wmb(); 626 smp_wmb();
602 atomic_set(&ct->ct_general.use, 1); 627 atomic_set(&ct->ct_general.use, 1);
603 return ct; 628 return ct;
629
630#ifdef CONFIG_NF_CONNTRACK_ZONES
631out_free:
632 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
633 return ERR_PTR(-ENOMEM);
634#endif
604} 635}
605EXPORT_SYMBOL_GPL(nf_conntrack_alloc); 636EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
606 637
@@ -611,14 +642,14 @@ void nf_conntrack_free(struct nf_conn *ct)
611 nf_ct_ext_destroy(ct); 642 nf_ct_ext_destroy(ct);
612 atomic_dec(&net->ct.count); 643 atomic_dec(&net->ct.count);
613 nf_ct_ext_free(ct); 644 nf_ct_ext_free(ct);
614 kmem_cache_free(nf_conntrack_cachep, ct); 645 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
615} 646}
616EXPORT_SYMBOL_GPL(nf_conntrack_free); 647EXPORT_SYMBOL_GPL(nf_conntrack_free);
617 648
618/* Allocate a new conntrack: we return -ENOMEM if classification 649/* Allocate a new conntrack: we return -ENOMEM if classification
619 failed due to stress. Otherwise it really is unclassifiable. */ 650 failed due to stress. Otherwise it really is unclassifiable. */
620static struct nf_conntrack_tuple_hash * 651static struct nf_conntrack_tuple_hash *
621init_conntrack(struct net *net, 652init_conntrack(struct net *net, struct nf_conn *tmpl,
622 const struct nf_conntrack_tuple *tuple, 653 const struct nf_conntrack_tuple *tuple,
623 struct nf_conntrack_l3proto *l3proto, 654 struct nf_conntrack_l3proto *l3proto,
624 struct nf_conntrack_l4proto *l4proto, 655 struct nf_conntrack_l4proto *l4proto,
@@ -628,14 +659,16 @@ init_conntrack(struct net *net,
628 struct nf_conn *ct; 659 struct nf_conn *ct;
629 struct nf_conn_help *help; 660 struct nf_conn_help *help;
630 struct nf_conntrack_tuple repl_tuple; 661 struct nf_conntrack_tuple repl_tuple;
662 struct nf_conntrack_ecache *ecache;
631 struct nf_conntrack_expect *exp; 663 struct nf_conntrack_expect *exp;
664 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
632 665
633 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { 666 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
634 pr_debug("Can't invert tuple.\n"); 667 pr_debug("Can't invert tuple.\n");
635 return NULL; 668 return NULL;
636 } 669 }
637 670
638 ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC); 671 ct = nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC);
639 if (IS_ERR(ct)) { 672 if (IS_ERR(ct)) {
640 pr_debug("Can't allocate conntrack.\n"); 673 pr_debug("Can't allocate conntrack.\n");
641 return (struct nf_conntrack_tuple_hash *)ct; 674 return (struct nf_conntrack_tuple_hash *)ct;
@@ -648,10 +681,14 @@ init_conntrack(struct net *net,
648 } 681 }
649 682
650 nf_ct_acct_ext_add(ct, GFP_ATOMIC); 683 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
651 nf_ct_ecache_ext_add(ct, GFP_ATOMIC); 684
685 ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
686 nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
687 ecache ? ecache->expmask : 0,
688 GFP_ATOMIC);
652 689
653 spin_lock_bh(&nf_conntrack_lock); 690 spin_lock_bh(&nf_conntrack_lock);
654 exp = nf_ct_find_expectation(net, tuple); 691 exp = nf_ct_find_expectation(net, zone, tuple);
655 if (exp) { 692 if (exp) {
656 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", 693 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
657 ct, exp); 694 ct, exp);
@@ -673,7 +710,7 @@ init_conntrack(struct net *net,
673 nf_conntrack_get(&ct->master->ct_general); 710 nf_conntrack_get(&ct->master->ct_general);
674 NF_CT_STAT_INC(net, expect_new); 711 NF_CT_STAT_INC(net, expect_new);
675 } else { 712 } else {
676 __nf_ct_try_assign_helper(ct, GFP_ATOMIC); 713 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
677 NF_CT_STAT_INC(net, new); 714 NF_CT_STAT_INC(net, new);
678 } 715 }
679 716
@@ -694,7 +731,7 @@ init_conntrack(struct net *net,
694 731
695/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ 732/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
696static inline struct nf_conn * 733static inline struct nf_conn *
697resolve_normal_ct(struct net *net, 734resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
698 struct sk_buff *skb, 735 struct sk_buff *skb,
699 unsigned int dataoff, 736 unsigned int dataoff,
700 u_int16_t l3num, 737 u_int16_t l3num,
@@ -707,6 +744,7 @@ resolve_normal_ct(struct net *net,
707 struct nf_conntrack_tuple tuple; 744 struct nf_conntrack_tuple tuple;
708 struct nf_conntrack_tuple_hash *h; 745 struct nf_conntrack_tuple_hash *h;
709 struct nf_conn *ct; 746 struct nf_conn *ct;
747 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
710 748
711 if (!nf_ct_get_tuple(skb, skb_network_offset(skb), 749 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
712 dataoff, l3num, protonum, &tuple, l3proto, 750 dataoff, l3num, protonum, &tuple, l3proto,
@@ -716,9 +754,10 @@ resolve_normal_ct(struct net *net,
716 } 754 }
717 755
718 /* look for tuple match */ 756 /* look for tuple match */
719 h = nf_conntrack_find_get(net, &tuple); 757 h = nf_conntrack_find_get(net, zone, &tuple);
720 if (!h) { 758 if (!h) {
721 h = init_conntrack(net, &tuple, l3proto, l4proto, skb, dataoff); 759 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
760 skb, dataoff);
722 if (!h) 761 if (!h)
723 return NULL; 762 return NULL;
724 if (IS_ERR(h)) 763 if (IS_ERR(h))
@@ -755,7 +794,7 @@ unsigned int
755nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, 794nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
756 struct sk_buff *skb) 795 struct sk_buff *skb)
757{ 796{
758 struct nf_conn *ct; 797 struct nf_conn *ct, *tmpl = NULL;
759 enum ip_conntrack_info ctinfo; 798 enum ip_conntrack_info ctinfo;
760 struct nf_conntrack_l3proto *l3proto; 799 struct nf_conntrack_l3proto *l3proto;
761 struct nf_conntrack_l4proto *l4proto; 800 struct nf_conntrack_l4proto *l4proto;
@@ -764,10 +803,14 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
764 int set_reply = 0; 803 int set_reply = 0;
765 int ret; 804 int ret;
766 805
767 /* Previously seen (loopback or untracked)? Ignore. */
768 if (skb->nfct) { 806 if (skb->nfct) {
769 NF_CT_STAT_INC_ATOMIC(net, ignore); 807 /* Previously seen (loopback or untracked)? Ignore. */
770 return NF_ACCEPT; 808 tmpl = (struct nf_conn *)skb->nfct;
809 if (!nf_ct_is_template(tmpl)) {
810 NF_CT_STAT_INC_ATOMIC(net, ignore);
811 return NF_ACCEPT;
812 }
813 skb->nfct = NULL;
771 } 814 }
772 815
773 /* rcu_read_lock()ed by nf_hook_slow */ 816 /* rcu_read_lock()ed by nf_hook_slow */
@@ -778,7 +821,8 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
778 pr_debug("not prepared to track yet or error occured\n"); 821 pr_debug("not prepared to track yet or error occured\n");
779 NF_CT_STAT_INC_ATOMIC(net, error); 822 NF_CT_STAT_INC_ATOMIC(net, error);
780 NF_CT_STAT_INC_ATOMIC(net, invalid); 823 NF_CT_STAT_INC_ATOMIC(net, invalid);
781 return -ret; 824 ret = -ret;
825 goto out;
782 } 826 }
783 827
784 l4proto = __nf_ct_l4proto_find(pf, protonum); 828 l4proto = __nf_ct_l4proto_find(pf, protonum);
@@ -787,26 +831,30 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
787 * inverse of the return code tells to the netfilter 831 * inverse of the return code tells to the netfilter
788 * core what to do with the packet. */ 832 * core what to do with the packet. */
789 if (l4proto->error != NULL) { 833 if (l4proto->error != NULL) {
790 ret = l4proto->error(net, skb, dataoff, &ctinfo, pf, hooknum); 834 ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
835 pf, hooknum);
791 if (ret <= 0) { 836 if (ret <= 0) {
792 NF_CT_STAT_INC_ATOMIC(net, error); 837 NF_CT_STAT_INC_ATOMIC(net, error);
793 NF_CT_STAT_INC_ATOMIC(net, invalid); 838 NF_CT_STAT_INC_ATOMIC(net, invalid);
794 return -ret; 839 ret = -ret;
840 goto out;
795 } 841 }
796 } 842 }
797 843
798 ct = resolve_normal_ct(net, skb, dataoff, pf, protonum, 844 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
799 l3proto, l4proto, &set_reply, &ctinfo); 845 l3proto, l4proto, &set_reply, &ctinfo);
800 if (!ct) { 846 if (!ct) {
801 /* Not valid part of a connection */ 847 /* Not valid part of a connection */
802 NF_CT_STAT_INC_ATOMIC(net, invalid); 848 NF_CT_STAT_INC_ATOMIC(net, invalid);
803 return NF_ACCEPT; 849 ret = NF_ACCEPT;
850 goto out;
804 } 851 }
805 852
806 if (IS_ERR(ct)) { 853 if (IS_ERR(ct)) {
807 /* Too stressed to deal. */ 854 /* Too stressed to deal. */
808 NF_CT_STAT_INC_ATOMIC(net, drop); 855 NF_CT_STAT_INC_ATOMIC(net, drop);
809 return NF_DROP; 856 ret = NF_DROP;
857 goto out;
810 } 858 }
811 859
812 NF_CT_ASSERT(skb->nfct); 860 NF_CT_ASSERT(skb->nfct);
@@ -821,11 +869,15 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
821 NF_CT_STAT_INC_ATOMIC(net, invalid); 869 NF_CT_STAT_INC_ATOMIC(net, invalid);
822 if (ret == -NF_DROP) 870 if (ret == -NF_DROP)
823 NF_CT_STAT_INC_ATOMIC(net, drop); 871 NF_CT_STAT_INC_ATOMIC(net, drop);
824 return -ret; 872 ret = -ret;
873 goto out;
825 } 874 }
826 875
827 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) 876 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
828 nf_conntrack_event_cache(IPCT_STATUS, ct); 877 nf_conntrack_event_cache(IPCT_REPLY, ct);
878out:
879 if (tmpl)
880 nf_ct_put(tmpl);
829 881
830 return ret; 882 return ret;
831} 883}
@@ -864,7 +916,7 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
864 return; 916 return;
865 917
866 rcu_read_lock(); 918 rcu_read_lock();
867 __nf_ct_try_assign_helper(ct, GFP_ATOMIC); 919 __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
868 rcu_read_unlock(); 920 rcu_read_unlock();
869} 921}
870EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); 922EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
@@ -938,6 +990,14 @@ bool __nf_ct_kill_acct(struct nf_conn *ct,
938} 990}
939EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); 991EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
940 992
993#ifdef CONFIG_NF_CONNTRACK_ZONES
994static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
995 .len = sizeof(struct nf_conntrack_zone),
996 .align = __alignof__(struct nf_conntrack_zone),
997 .id = NF_CT_EXT_ZONE,
998};
999#endif
1000
941#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 1001#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
942 1002
943#include <linux/netfilter/nfnetlink.h> 1003#include <linux/netfilter/nfnetlink.h>
@@ -1014,7 +1074,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1014 struct hlist_nulls_node *n; 1074 struct hlist_nulls_node *n;
1015 1075
1016 spin_lock_bh(&nf_conntrack_lock); 1076 spin_lock_bh(&nf_conntrack_lock);
1017 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 1077 for (; *bucket < net->ct.htable_size; (*bucket)++) {
1018 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { 1078 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1019 ct = nf_ct_tuplehash_to_ctrack(h); 1079 ct = nf_ct_tuplehash_to_ctrack(h);
1020 if (iter(ct, data)) 1080 if (iter(ct, data))
@@ -1113,9 +1173,15 @@ static void nf_ct_release_dying_list(struct net *net)
1113 1173
1114static void nf_conntrack_cleanup_init_net(void) 1174static void nf_conntrack_cleanup_init_net(void)
1115{ 1175{
1176 /* wait until all references to nf_conntrack_untracked are dropped */
1177 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1178 schedule();
1179
1116 nf_conntrack_helper_fini(); 1180 nf_conntrack_helper_fini();
1117 nf_conntrack_proto_fini(); 1181 nf_conntrack_proto_fini();
1118 kmem_cache_destroy(nf_conntrack_cachep); 1182#ifdef CONFIG_NF_CONNTRACK_ZONES
1183 nf_ct_extend_unregister(&nf_ct_zone_extend);
1184#endif
1119} 1185}
1120 1186
1121static void nf_conntrack_cleanup_net(struct net *net) 1187static void nf_conntrack_cleanup_net(struct net *net)
@@ -1127,15 +1193,14 @@ static void nf_conntrack_cleanup_net(struct net *net)
1127 schedule(); 1193 schedule();
1128 goto i_see_dead_people; 1194 goto i_see_dead_people;
1129 } 1195 }
1130 /* wait until all references to nf_conntrack_untracked are dropped */
1131 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1132 schedule();
1133 1196
1134 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1197 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1135 nf_conntrack_htable_size); 1198 net->ct.htable_size);
1136 nf_conntrack_ecache_fini(net); 1199 nf_conntrack_ecache_fini(net);
1137 nf_conntrack_acct_fini(net); 1200 nf_conntrack_acct_fini(net);
1138 nf_conntrack_expect_fini(net); 1201 nf_conntrack_expect_fini(net);
1202 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1203 kfree(net->ct.slabname);
1139 free_percpu(net->ct.stat); 1204 free_percpu(net->ct.stat);
1140} 1205}
1141 1206
@@ -1190,9 +1255,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1190{ 1255{
1191 int i, bucket, vmalloced, old_vmalloced; 1256 int i, bucket, vmalloced, old_vmalloced;
1192 unsigned int hashsize, old_size; 1257 unsigned int hashsize, old_size;
1193 int rnd;
1194 struct hlist_nulls_head *hash, *old_hash; 1258 struct hlist_nulls_head *hash, *old_hash;
1195 struct nf_conntrack_tuple_hash *h; 1259 struct nf_conntrack_tuple_hash *h;
1260 struct nf_conn *ct;
1261
1262 if (current->nsproxy->net_ns != &init_net)
1263 return -EOPNOTSUPP;
1196 1264
1197 /* On boot, we can set this without any fancy locking. */ 1265 /* On boot, we can set this without any fancy locking. */
1198 if (!nf_conntrack_htable_size) 1266 if (!nf_conntrack_htable_size)
@@ -1206,33 +1274,31 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1206 if (!hash) 1274 if (!hash)
1207 return -ENOMEM; 1275 return -ENOMEM;
1208 1276
1209 /* We have to rehahs for the new table anyway, so we also can
1210 * use a newrandom seed */
1211 get_random_bytes(&rnd, sizeof(rnd));
1212
1213 /* Lookups in the old hash might happen in parallel, which means we 1277 /* Lookups in the old hash might happen in parallel, which means we
1214 * might get false negatives during connection lookup. New connections 1278 * might get false negatives during connection lookup. New connections
1215 * created because of a false negative won't make it into the hash 1279 * created because of a false negative won't make it into the hash
1216 * though since that required taking the lock. 1280 * though since that required taking the lock.
1217 */ 1281 */
1218 spin_lock_bh(&nf_conntrack_lock); 1282 spin_lock_bh(&nf_conntrack_lock);
1219 for (i = 0; i < nf_conntrack_htable_size; i++) { 1283 for (i = 0; i < init_net.ct.htable_size; i++) {
1220 while (!hlist_nulls_empty(&init_net.ct.hash[i])) { 1284 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1221 h = hlist_nulls_entry(init_net.ct.hash[i].first, 1285 h = hlist_nulls_entry(init_net.ct.hash[i].first,
1222 struct nf_conntrack_tuple_hash, hnnode); 1286 struct nf_conntrack_tuple_hash, hnnode);
1287 ct = nf_ct_tuplehash_to_ctrack(h);
1223 hlist_nulls_del_rcu(&h->hnnode); 1288 hlist_nulls_del_rcu(&h->hnnode);
1224 bucket = __hash_conntrack(&h->tuple, hashsize, rnd); 1289 bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
1290 hashsize,
1291 nf_conntrack_hash_rnd);
1225 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); 1292 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1226 } 1293 }
1227 } 1294 }
1228 old_size = nf_conntrack_htable_size; 1295 old_size = init_net.ct.htable_size;
1229 old_vmalloced = init_net.ct.hash_vmalloc; 1296 old_vmalloced = init_net.ct.hash_vmalloc;
1230 old_hash = init_net.ct.hash; 1297 old_hash = init_net.ct.hash;
1231 1298
1232 nf_conntrack_htable_size = hashsize; 1299 init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
1233 init_net.ct.hash_vmalloc = vmalloced; 1300 init_net.ct.hash_vmalloc = vmalloced;
1234 init_net.ct.hash = hash; 1301 init_net.ct.hash = hash;
1235 nf_conntrack_hash_rnd = rnd;
1236 spin_unlock_bh(&nf_conntrack_lock); 1302 spin_unlock_bh(&nf_conntrack_lock);
1237 1303
1238 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); 1304 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
@@ -1271,15 +1337,6 @@ static int nf_conntrack_init_init_net(void)
1271 NF_CONNTRACK_VERSION, nf_conntrack_htable_size, 1337 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1272 nf_conntrack_max); 1338 nf_conntrack_max);
1273 1339
1274 nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1275 sizeof(struct nf_conn),
1276 0, SLAB_DESTROY_BY_RCU, NULL);
1277 if (!nf_conntrack_cachep) {
1278 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1279 ret = -ENOMEM;
1280 goto err_cache;
1281 }
1282
1283 ret = nf_conntrack_proto_init(); 1340 ret = nf_conntrack_proto_init();
1284 if (ret < 0) 1341 if (ret < 0)
1285 goto err_proto; 1342 goto err_proto;
@@ -1288,13 +1345,28 @@ static int nf_conntrack_init_init_net(void)
1288 if (ret < 0) 1345 if (ret < 0)
1289 goto err_helper; 1346 goto err_helper;
1290 1347
1348#ifdef CONFIG_NF_CONNTRACK_ZONES
1349 ret = nf_ct_extend_register(&nf_ct_zone_extend);
1350 if (ret < 0)
1351 goto err_extend;
1352#endif
1353 /* Set up fake conntrack: to never be deleted, not in any hashes */
1354#ifdef CONFIG_NET_NS
1355 nf_conntrack_untracked.ct_net = &init_net;
1356#endif
1357 atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1358 /* - and look it like as a confirmed connection */
1359 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1360
1291 return 0; 1361 return 0;
1292 1362
1363#ifdef CONFIG_NF_CONNTRACK_ZONES
1364err_extend:
1365 nf_conntrack_helper_fini();
1366#endif
1293err_helper: 1367err_helper:
1294 nf_conntrack_proto_fini(); 1368 nf_conntrack_proto_fini();
1295err_proto: 1369err_proto:
1296 kmem_cache_destroy(nf_conntrack_cachep);
1297err_cache:
1298 return ret; 1370 return ret;
1299} 1371}
1300 1372
@@ -1316,7 +1388,24 @@ static int nf_conntrack_init_net(struct net *net)
1316 ret = -ENOMEM; 1388 ret = -ENOMEM;
1317 goto err_stat; 1389 goto err_stat;
1318 } 1390 }
1319 net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1391
1392 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
1393 if (!net->ct.slabname) {
1394 ret = -ENOMEM;
1395 goto err_slabname;
1396 }
1397
1398 net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
1399 sizeof(struct nf_conn), 0,
1400 SLAB_DESTROY_BY_RCU, NULL);
1401 if (!net->ct.nf_conntrack_cachep) {
1402 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1403 ret = -ENOMEM;
1404 goto err_cache;
1405 }
1406
1407 net->ct.htable_size = nf_conntrack_htable_size;
1408 net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
1320 &net->ct.hash_vmalloc, 1); 1409 &net->ct.hash_vmalloc, 1);
1321 if (!net->ct.hash) { 1410 if (!net->ct.hash) {
1322 ret = -ENOMEM; 1411 ret = -ENOMEM;
@@ -1333,15 +1422,6 @@ static int nf_conntrack_init_net(struct net *net)
1333 if (ret < 0) 1422 if (ret < 0)
1334 goto err_ecache; 1423 goto err_ecache;
1335 1424
1336 /* Set up fake conntrack:
1337 - to never be deleted, not in any hashes */
1338#ifdef CONFIG_NET_NS
1339 nf_conntrack_untracked.ct_net = &init_net;
1340#endif
1341 atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1342 /* - and look it like as a confirmed connection */
1343 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1344
1345 return 0; 1425 return 0;
1346 1426
1347err_ecache: 1427err_ecache:
@@ -1350,8 +1430,12 @@ err_acct:
1350 nf_conntrack_expect_fini(net); 1430 nf_conntrack_expect_fini(net);
1351err_expect: 1431err_expect:
1352 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1432 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1353 nf_conntrack_htable_size); 1433 net->ct.htable_size);
1354err_hash: 1434err_hash:
1435 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1436err_cache:
1437 kfree(net->ct.slabname);
1438err_slabname:
1355 free_percpu(net->ct.stat); 1439 free_percpu(net->ct.stat);
1356err_stat: 1440err_stat:
1357 return ret; 1441 return ret;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index fdf5d2a1d9b4..acb29ccaa41f 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -27,6 +27,7 @@
27#include <net/netfilter/nf_conntrack_expect.h> 27#include <net/netfilter/nf_conntrack_expect.h>
28#include <net/netfilter/nf_conntrack_helper.h> 28#include <net/netfilter/nf_conntrack_helper.h>
29#include <net/netfilter/nf_conntrack_tuple.h> 29#include <net/netfilter/nf_conntrack_tuple.h>
30#include <net/netfilter/nf_conntrack_zones.h>
30 31
31unsigned int nf_ct_expect_hsize __read_mostly; 32unsigned int nf_ct_expect_hsize __read_mostly;
32EXPORT_SYMBOL_GPL(nf_ct_expect_hsize); 33EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
@@ -84,7 +85,8 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple
84} 85}
85 86
86struct nf_conntrack_expect * 87struct nf_conntrack_expect *
87__nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple) 88__nf_ct_expect_find(struct net *net, u16 zone,
89 const struct nf_conntrack_tuple *tuple)
88{ 90{
89 struct nf_conntrack_expect *i; 91 struct nf_conntrack_expect *i;
90 struct hlist_node *n; 92 struct hlist_node *n;
@@ -95,7 +97,8 @@ __nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple)
95 97
96 h = nf_ct_expect_dst_hash(tuple); 98 h = nf_ct_expect_dst_hash(tuple);
97 hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) { 99 hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
98 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) 100 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
101 nf_ct_zone(i->master) == zone)
99 return i; 102 return i;
100 } 103 }
101 return NULL; 104 return NULL;
@@ -104,12 +107,13 @@ EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
104 107
105/* Just find a expectation corresponding to a tuple. */ 108/* Just find a expectation corresponding to a tuple. */
106struct nf_conntrack_expect * 109struct nf_conntrack_expect *
107nf_ct_expect_find_get(struct net *net, const struct nf_conntrack_tuple *tuple) 110nf_ct_expect_find_get(struct net *net, u16 zone,
111 const struct nf_conntrack_tuple *tuple)
108{ 112{
109 struct nf_conntrack_expect *i; 113 struct nf_conntrack_expect *i;
110 114
111 rcu_read_lock(); 115 rcu_read_lock();
112 i = __nf_ct_expect_find(net, tuple); 116 i = __nf_ct_expect_find(net, zone, tuple);
113 if (i && !atomic_inc_not_zero(&i->use)) 117 if (i && !atomic_inc_not_zero(&i->use))
114 i = NULL; 118 i = NULL;
115 rcu_read_unlock(); 119 rcu_read_unlock();
@@ -121,7 +125,8 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
121/* If an expectation for this connection is found, it gets delete from 125/* If an expectation for this connection is found, it gets delete from
122 * global list then returned. */ 126 * global list then returned. */
123struct nf_conntrack_expect * 127struct nf_conntrack_expect *
124nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple) 128nf_ct_find_expectation(struct net *net, u16 zone,
129 const struct nf_conntrack_tuple *tuple)
125{ 130{
126 struct nf_conntrack_expect *i, *exp = NULL; 131 struct nf_conntrack_expect *i, *exp = NULL;
127 struct hlist_node *n; 132 struct hlist_node *n;
@@ -133,7 +138,8 @@ nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple)
133 h = nf_ct_expect_dst_hash(tuple); 138 h = nf_ct_expect_dst_hash(tuple);
134 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { 139 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
135 if (!(i->flags & NF_CT_EXPECT_INACTIVE) && 140 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
136 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) { 141 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
142 nf_ct_zone(i->master) == zone) {
137 exp = i; 143 exp = i;
138 break; 144 break;
139 } 145 }
@@ -204,7 +210,8 @@ static inline int expect_matches(const struct nf_conntrack_expect *a,
204{ 210{
205 return a->master == b->master && a->class == b->class && 211 return a->master == b->master && a->class == b->class &&
206 nf_ct_tuple_equal(&a->tuple, &b->tuple) && 212 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
207 nf_ct_tuple_mask_equal(&a->mask, &b->mask); 213 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
214 nf_ct_zone(a->master) == nf_ct_zone(b->master);
208} 215}
209 216
210/* Generally a bad idea to call this: could have matched already. */ 217/* Generally a bad idea to call this: could have matched already. */
@@ -232,7 +239,6 @@ struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
232 239
233 new->master = me; 240 new->master = me;
234 atomic_set(&new->use, 1); 241 atomic_set(&new->use, 1);
235 INIT_RCU_HEAD(&new->rcu);
236 return new; 242 return new;
237} 243}
238EXPORT_SYMBOL_GPL(nf_ct_expect_alloc); 244EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
@@ -500,6 +506,7 @@ static void exp_seq_stop(struct seq_file *seq, void *v)
500static int exp_seq_show(struct seq_file *s, void *v) 506static int exp_seq_show(struct seq_file *s, void *v)
501{ 507{
502 struct nf_conntrack_expect *expect; 508 struct nf_conntrack_expect *expect;
509 struct nf_conntrack_helper *helper;
503 struct hlist_node *n = v; 510 struct hlist_node *n = v;
504 char *delim = ""; 511 char *delim = "";
505 512
@@ -525,6 +532,14 @@ static int exp_seq_show(struct seq_file *s, void *v)
525 if (expect->flags & NF_CT_EXPECT_INACTIVE) 532 if (expect->flags & NF_CT_EXPECT_INACTIVE)
526 seq_printf(s, "%sINACTIVE", delim); 533 seq_printf(s, "%sINACTIVE", delim);
527 534
535 helper = rcu_dereference(nfct_help(expect->master)->helper);
536 if (helper) {
537 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
538 if (helper->expect_policy[expect->class].name)
539 seq_printf(s, "/%s",
540 helper->expect_policy[expect->class].name);
541 }
542
528 return seq_putc(s, '\n'); 543 return seq_putc(s, '\n');
529} 544}
530 545
@@ -569,7 +584,7 @@ static void exp_proc_remove(struct net *net)
569#endif /* CONFIG_PROC_FS */ 584#endif /* CONFIG_PROC_FS */
570} 585}
571 586
572module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600); 587module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
573 588
574int nf_conntrack_expect_init(struct net *net) 589int nf_conntrack_expect_init(struct net *net)
575{ 590{
@@ -577,7 +592,7 @@ int nf_conntrack_expect_init(struct net *net)
577 592
578 if (net_eq(net, &init_net)) { 593 if (net_eq(net, &init_net)) {
579 if (!nf_ct_expect_hsize) { 594 if (!nf_ct_expect_hsize) {
580 nf_ct_expect_hsize = nf_conntrack_htable_size / 256; 595 nf_ct_expect_hsize = net->ct.htable_size / 256;
581 if (!nf_ct_expect_hsize) 596 if (!nf_ct_expect_hsize)
582 nf_ct_expect_hsize = 1; 597 nf_ct_expect_hsize = 1;
583 } 598 }
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index fef95be334bd..fdc8fb4ae10f 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -59,7 +59,6 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
59 if (!*ext) 59 if (!*ext)
60 return NULL; 60 return NULL;
61 61
62 INIT_RCU_HEAD(&(*ext)->rcu);
63 (*ext)->offset[id] = off; 62 (*ext)->offset[id] = off;
64 (*ext)->len = len; 63 (*ext)->len = len;
65 64
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 38ea7ef3ccd2..f0732aa18e4f 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -323,24 +323,24 @@ static void update_nl_seq(struct nf_conn *ct, u32 nl_seq,
323 struct nf_ct_ftp_master *info, int dir, 323 struct nf_ct_ftp_master *info, int dir,
324 struct sk_buff *skb) 324 struct sk_buff *skb)
325{ 325{
326 unsigned int i, oldest = NUM_SEQ_TO_REMEMBER; 326 unsigned int i, oldest;
327 327
328 /* Look for oldest: if we find exact match, we're done. */ 328 /* Look for oldest: if we find exact match, we're done. */
329 for (i = 0; i < info->seq_aft_nl_num[dir]; i++) { 329 for (i = 0; i < info->seq_aft_nl_num[dir]; i++) {
330 if (info->seq_aft_nl[dir][i] == nl_seq) 330 if (info->seq_aft_nl[dir][i] == nl_seq)
331 return; 331 return;
332
333 if (oldest == info->seq_aft_nl_num[dir] ||
334 before(info->seq_aft_nl[dir][i],
335 info->seq_aft_nl[dir][oldest]))
336 oldest = i;
337 } 332 }
338 333
339 if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) { 334 if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) {
340 info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq; 335 info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq;
341 } else if (oldest != NUM_SEQ_TO_REMEMBER && 336 } else {
342 after(nl_seq, info->seq_aft_nl[dir][oldest])) { 337 if (before(info->seq_aft_nl[dir][0], info->seq_aft_nl[dir][1]))
343 info->seq_aft_nl[dir][oldest] = nl_seq; 338 oldest = 0;
339 else
340 oldest = 1;
341
342 if (after(nl_seq, info->seq_aft_nl[dir][oldest]))
343 info->seq_aft_nl[dir][oldest] = nl_seq;
344 } 344 }
345} 345}
346 346
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 66369490230e..a1c8dd917e12 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -29,6 +29,7 @@
29#include <net/netfilter/nf_conntrack_expect.h> 29#include <net/netfilter/nf_conntrack_expect.h>
30#include <net/netfilter/nf_conntrack_ecache.h> 30#include <net/netfilter/nf_conntrack_ecache.h>
31#include <net/netfilter/nf_conntrack_helper.h> 31#include <net/netfilter/nf_conntrack_helper.h>
32#include <net/netfilter/nf_conntrack_zones.h>
32#include <linux/netfilter/nf_conntrack_h323.h> 33#include <linux/netfilter/nf_conntrack_h323.h>
33 34
34/* Parameters */ 35/* Parameters */
@@ -1216,7 +1217,7 @@ static struct nf_conntrack_expect *find_expect(struct nf_conn *ct,
1216 tuple.dst.u.tcp.port = port; 1217 tuple.dst.u.tcp.port = port;
1217 tuple.dst.protonum = IPPROTO_TCP; 1218 tuple.dst.protonum = IPPROTO_TCP;
1218 1219
1219 exp = __nf_ct_expect_find(net, &tuple); 1220 exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple);
1220 if (exp && exp->master == ct) 1221 if (exp && exp->master == ct)
1221 return exp; 1222 return exp;
1222 return NULL; 1223 return NULL;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 65c2a7bc3afc..4509fa6726f8 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -65,7 +65,7 @@ __nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
65} 65}
66 66
67struct nf_conntrack_helper * 67struct nf_conntrack_helper *
68__nf_conntrack_helper_find_byname(const char *name) 68__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
69{ 69{
70 struct nf_conntrack_helper *h; 70 struct nf_conntrack_helper *h;
71 struct hlist_node *n; 71 struct hlist_node *n;
@@ -73,13 +73,34 @@ __nf_conntrack_helper_find_byname(const char *name)
73 73
74 for (i = 0; i < nf_ct_helper_hsize; i++) { 74 for (i = 0; i < nf_ct_helper_hsize; i++) {
75 hlist_for_each_entry_rcu(h, n, &nf_ct_helper_hash[i], hnode) { 75 hlist_for_each_entry_rcu(h, n, &nf_ct_helper_hash[i], hnode) {
76 if (!strcmp(h->name, name)) 76 if (!strcmp(h->name, name) &&
77 h->tuple.src.l3num == l3num &&
78 h->tuple.dst.protonum == protonum)
77 return h; 79 return h;
78 } 80 }
79 } 81 }
80 return NULL; 82 return NULL;
81} 83}
82EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find_byname); 84EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find);
85
86struct nf_conntrack_helper *
87nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
88{
89 struct nf_conntrack_helper *h;
90
91 h = __nf_conntrack_helper_find(name, l3num, protonum);
92#ifdef CONFIG_MODULES
93 if (h == NULL) {
94 if (request_module("nfct-helper-%s", name) == 0)
95 h = __nf_conntrack_helper_find(name, l3num, protonum);
96 }
97#endif
98 if (h != NULL && !try_module_get(h->me))
99 h = NULL;
100
101 return h;
102}
103EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
83 104
84struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp) 105struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
85{ 106{
@@ -94,13 +115,22 @@ struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
94} 115}
95EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add); 116EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add);
96 117
97int __nf_ct_try_assign_helper(struct nf_conn *ct, gfp_t flags) 118int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
119 gfp_t flags)
98{ 120{
121 struct nf_conntrack_helper *helper = NULL;
122 struct nf_conn_help *help;
99 int ret = 0; 123 int ret = 0;
100 struct nf_conntrack_helper *helper;
101 struct nf_conn_help *help = nfct_help(ct);
102 124
103 helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 125 if (tmpl != NULL) {
126 help = nfct_help(tmpl);
127 if (help != NULL)
128 helper = help->helper;
129 }
130
131 help = nfct_help(ct);
132 if (helper == NULL)
133 helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
104 if (helper == NULL) { 134 if (helper == NULL) {
105 if (help) 135 if (help)
106 rcu_assign_pointer(help->helper, NULL); 136 rcu_assign_pointer(help->helper, NULL);
@@ -192,7 +222,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
192 /* Get rid of expecteds, set helpers to NULL. */ 222 /* Get rid of expecteds, set helpers to NULL. */
193 hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode) 223 hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode)
194 unhelp(h, me); 224 unhelp(h, me);
195 for (i = 0; i < nf_conntrack_htable_size; i++) { 225 for (i = 0; i < net->ct.htable_size; i++) {
196 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) 226 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
197 unhelp(h, me); 227 unhelp(h, me);
198 } 228 }
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 59d8064eb522..569410a85953 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -30,6 +30,7 @@
30 30
31#include <linux/netfilter.h> 31#include <linux/netfilter.h>
32#include <net/netlink.h> 32#include <net/netlink.h>
33#include <net/sock.h>
33#include <net/netfilter/nf_conntrack.h> 34#include <net/netfilter/nf_conntrack.h>
34#include <net/netfilter/nf_conntrack_core.h> 35#include <net/netfilter/nf_conntrack_core.h>
35#include <net/netfilter/nf_conntrack_expect.h> 36#include <net/netfilter/nf_conntrack_expect.h>
@@ -38,6 +39,7 @@
38#include <net/netfilter/nf_conntrack_l4proto.h> 39#include <net/netfilter/nf_conntrack_l4proto.h>
39#include <net/netfilter/nf_conntrack_tuple.h> 40#include <net/netfilter/nf_conntrack_tuple.h>
40#include <net/netfilter/nf_conntrack_acct.h> 41#include <net/netfilter/nf_conntrack_acct.h>
42#include <net/netfilter/nf_conntrack_zones.h>
41#ifdef CONFIG_NF_NAT_NEEDED 43#ifdef CONFIG_NF_NAT_NEEDED
42#include <net/netfilter/nf_nat_core.h> 44#include <net/netfilter/nf_nat_core.h>
43#include <net/netfilter/nf_nat_protocol.h> 45#include <net/netfilter/nf_nat_protocol.h>
@@ -378,6 +380,9 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
378 goto nla_put_failure; 380 goto nla_put_failure;
379 nla_nest_end(skb, nest_parms); 381 nla_nest_end(skb, nest_parms);
380 382
383 if (nf_ct_zone(ct))
384 NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
385
381 if (ctnetlink_dump_status(skb, ct) < 0 || 386 if (ctnetlink_dump_status(skb, ct) < 0 ||
382 ctnetlink_dump_timeout(skb, ct) < 0 || 387 ctnetlink_dump_timeout(skb, ct) < 0 ||
383 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || 388 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
@@ -456,6 +461,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
456static int 461static int
457ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) 462ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
458{ 463{
464 struct net *net;
459 struct nlmsghdr *nlh; 465 struct nlmsghdr *nlh;
460 struct nfgenmsg *nfmsg; 466 struct nfgenmsg *nfmsg;
461 struct nlattr *nest_parms; 467 struct nlattr *nest_parms;
@@ -482,7 +488,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
482 } else 488 } else
483 return 0; 489 return 0;
484 490
485 if (!item->report && !nfnetlink_has_listeners(group)) 491 net = nf_ct_net(ct);
492 if (!item->report && !nfnetlink_has_listeners(net, group))
486 return 0; 493 return 0;
487 494
488 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC); 495 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
@@ -514,6 +521,9 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
514 goto nla_put_failure; 521 goto nla_put_failure;
515 nla_nest_end(skb, nest_parms); 522 nla_nest_end(skb, nest_parms);
516 523
524 if (nf_ct_zone(ct))
525 NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
526
517 if (ctnetlink_dump_id(skb, ct) < 0) 527 if (ctnetlink_dump_id(skb, ct) < 0)
518 goto nla_put_failure; 528 goto nla_put_failure;
519 529
@@ -559,7 +569,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
559 rcu_read_unlock(); 569 rcu_read_unlock();
560 570
561 nlmsg_end(skb, nlh); 571 nlmsg_end(skb, nlh);
562 err = nfnetlink_send(skb, item->pid, group, item->report, GFP_ATOMIC); 572 err = nfnetlink_send(skb, net, item->pid, group, item->report,
573 GFP_ATOMIC);
563 if (err == -ENOBUFS || err == -EAGAIN) 574 if (err == -ENOBUFS || err == -EAGAIN)
564 return -ENOBUFS; 575 return -ENOBUFS;
565 576
@@ -571,7 +582,9 @@ nla_put_failure:
571nlmsg_failure: 582nlmsg_failure:
572 kfree_skb(skb); 583 kfree_skb(skb);
573errout: 584errout:
574 nfnetlink_set_err(0, group, -ENOBUFS); 585 if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
586 return -ENOBUFS;
587
575 return 0; 588 return 0;
576} 589}
577#endif /* CONFIG_NF_CONNTRACK_EVENTS */ 590#endif /* CONFIG_NF_CONNTRACK_EVENTS */
@@ -586,6 +599,7 @@ static int ctnetlink_done(struct netlink_callback *cb)
586static int 599static int
587ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 600ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
588{ 601{
602 struct net *net = sock_net(skb->sk);
589 struct nf_conn *ct, *last; 603 struct nf_conn *ct, *last;
590 struct nf_conntrack_tuple_hash *h; 604 struct nf_conntrack_tuple_hash *h;
591 struct hlist_nulls_node *n; 605 struct hlist_nulls_node *n;
@@ -594,9 +608,9 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
594 608
595 rcu_read_lock(); 609 rcu_read_lock();
596 last = (struct nf_conn *)cb->args[1]; 610 last = (struct nf_conn *)cb->args[1];
597 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 611 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
598restart: 612restart:
599 hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], 613 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]],
600 hnnode) { 614 hnnode) {
601 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) 615 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
602 continue; 616 continue;
@@ -703,6 +717,11 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
703 return ret; 717 return ret;
704} 718}
705 719
720static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
721 [CTA_TUPLE_IP] = { .type = NLA_NESTED },
722 [CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
723};
724
706static int 725static int
707ctnetlink_parse_tuple(const struct nlattr * const cda[], 726ctnetlink_parse_tuple(const struct nlattr * const cda[],
708 struct nf_conntrack_tuple *tuple, 727 struct nf_conntrack_tuple *tuple,
@@ -713,7 +732,7 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
713 732
714 memset(tuple, 0, sizeof(*tuple)); 733 memset(tuple, 0, sizeof(*tuple));
715 734
716 nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], NULL); 735 nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy);
717 736
718 if (!tb[CTA_TUPLE_IP]) 737 if (!tb[CTA_TUPLE_IP])
719 return -EINVAL; 738 return -EINVAL;
@@ -740,12 +759,31 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
740 return 0; 759 return 0;
741} 760}
742 761
762static int
763ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
764{
765 if (attr)
766#ifdef CONFIG_NF_CONNTRACK_ZONES
767 *zone = ntohs(nla_get_be16(attr));
768#else
769 return -EOPNOTSUPP;
770#endif
771 else
772 *zone = 0;
773
774 return 0;
775}
776
777static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
778 [CTA_HELP_NAME] = { .type = NLA_NUL_STRING },
779};
780
743static inline int 781static inline int
744ctnetlink_parse_help(const struct nlattr *attr, char **helper_name) 782ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
745{ 783{
746 struct nlattr *tb[CTA_HELP_MAX+1]; 784 struct nlattr *tb[CTA_HELP_MAX+1];
747 785
748 nla_parse_nested(tb, CTA_HELP_MAX, attr, NULL); 786 nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy);
749 787
750 if (!tb[CTA_HELP_NAME]) 788 if (!tb[CTA_HELP_NAME])
751 return -EINVAL; 789 return -EINVAL;
@@ -756,11 +794,18 @@ ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
756} 794}
757 795
758static const struct nla_policy ct_nla_policy[CTA_MAX+1] = { 796static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
797 [CTA_TUPLE_ORIG] = { .type = NLA_NESTED },
798 [CTA_TUPLE_REPLY] = { .type = NLA_NESTED },
759 [CTA_STATUS] = { .type = NLA_U32 }, 799 [CTA_STATUS] = { .type = NLA_U32 },
800 [CTA_PROTOINFO] = { .type = NLA_NESTED },
801 [CTA_HELP] = { .type = NLA_NESTED },
802 [CTA_NAT_SRC] = { .type = NLA_NESTED },
760 [CTA_TIMEOUT] = { .type = NLA_U32 }, 803 [CTA_TIMEOUT] = { .type = NLA_U32 },
761 [CTA_MARK] = { .type = NLA_U32 }, 804 [CTA_MARK] = { .type = NLA_U32 },
762 [CTA_USE] = { .type = NLA_U32 },
763 [CTA_ID] = { .type = NLA_U32 }, 805 [CTA_ID] = { .type = NLA_U32 },
806 [CTA_NAT_DST] = { .type = NLA_NESTED },
807 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
808 [CTA_ZONE] = { .type = NLA_U16 },
764}; 809};
765 810
766static int 811static int
@@ -768,12 +813,18 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
768 const struct nlmsghdr *nlh, 813 const struct nlmsghdr *nlh,
769 const struct nlattr * const cda[]) 814 const struct nlattr * const cda[])
770{ 815{
816 struct net *net = sock_net(ctnl);
771 struct nf_conntrack_tuple_hash *h; 817 struct nf_conntrack_tuple_hash *h;
772 struct nf_conntrack_tuple tuple; 818 struct nf_conntrack_tuple tuple;
773 struct nf_conn *ct; 819 struct nf_conn *ct;
774 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 820 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
775 u_int8_t u3 = nfmsg->nfgen_family; 821 u_int8_t u3 = nfmsg->nfgen_family;
776 int err = 0; 822 u16 zone;
823 int err;
824
825 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
826 if (err < 0)
827 return err;
777 828
778 if (cda[CTA_TUPLE_ORIG]) 829 if (cda[CTA_TUPLE_ORIG])
779 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); 830 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
@@ -781,7 +832,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
781 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3); 832 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
782 else { 833 else {
783 /* Flush the whole table */ 834 /* Flush the whole table */
784 nf_conntrack_flush_report(&init_net, 835 nf_conntrack_flush_report(net,
785 NETLINK_CB(skb).pid, 836 NETLINK_CB(skb).pid,
786 nlmsg_report(nlh)); 837 nlmsg_report(nlh));
787 return 0; 838 return 0;
@@ -790,7 +841,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
790 if (err < 0) 841 if (err < 0)
791 return err; 842 return err;
792 843
793 h = nf_conntrack_find_get(&init_net, &tuple); 844 h = nf_conntrack_find_get(net, zone, &tuple);
794 if (!h) 845 if (!h)
795 return -ENOENT; 846 return -ENOENT;
796 847
@@ -828,18 +879,24 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
828 const struct nlmsghdr *nlh, 879 const struct nlmsghdr *nlh,
829 const struct nlattr * const cda[]) 880 const struct nlattr * const cda[])
830{ 881{
882 struct net *net = sock_net(ctnl);
831 struct nf_conntrack_tuple_hash *h; 883 struct nf_conntrack_tuple_hash *h;
832 struct nf_conntrack_tuple tuple; 884 struct nf_conntrack_tuple tuple;
833 struct nf_conn *ct; 885 struct nf_conn *ct;
834 struct sk_buff *skb2 = NULL; 886 struct sk_buff *skb2 = NULL;
835 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 887 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
836 u_int8_t u3 = nfmsg->nfgen_family; 888 u_int8_t u3 = nfmsg->nfgen_family;
837 int err = 0; 889 u16 zone;
890 int err;
838 891
839 if (nlh->nlmsg_flags & NLM_F_DUMP) 892 if (nlh->nlmsg_flags & NLM_F_DUMP)
840 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, 893 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
841 ctnetlink_done); 894 ctnetlink_done);
842 895
896 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
897 if (err < 0)
898 return err;
899
843 if (cda[CTA_TUPLE_ORIG]) 900 if (cda[CTA_TUPLE_ORIG])
844 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); 901 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
845 else if (cda[CTA_TUPLE_REPLY]) 902 else if (cda[CTA_TUPLE_REPLY])
@@ -850,7 +907,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
850 if (err < 0) 907 if (err < 0)
851 return err; 908 return err;
852 909
853 h = nf_conntrack_find_get(&init_net, &tuple); 910 h = nf_conntrack_find_get(net, zone, &tuple);
854 if (!h) 911 if (!h)
855 return -ENOENT; 912 return -ENOENT;
856 913
@@ -994,7 +1051,8 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
994 return 0; 1051 return 0;
995 } 1052 }
996 1053
997 helper = __nf_conntrack_helper_find_byname(helpname); 1054 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1055 nf_ct_protonum(ct));
998 if (helper == NULL) { 1056 if (helper == NULL) {
999#ifdef CONFIG_MODULES 1057#ifdef CONFIG_MODULES
1000 spin_unlock_bh(&nf_conntrack_lock); 1058 spin_unlock_bh(&nf_conntrack_lock);
@@ -1005,7 +1063,8 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1005 } 1063 }
1006 1064
1007 spin_lock_bh(&nf_conntrack_lock); 1065 spin_lock_bh(&nf_conntrack_lock);
1008 helper = __nf_conntrack_helper_find_byname(helpname); 1066 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1067 nf_ct_protonum(ct));
1009 if (helper) 1068 if (helper)
1010 return -EAGAIN; 1069 return -EAGAIN;
1011#endif 1070#endif
@@ -1020,9 +1079,8 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1020 /* need to zero data of old helper */ 1079 /* need to zero data of old helper */
1021 memset(&help->help, 0, sizeof(help->help)); 1080 memset(&help->help, 0, sizeof(help->help));
1022 } else { 1081 } else {
1023 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 1082 /* we cannot set a helper for an existing conntrack */
1024 if (help == NULL) 1083 return -EOPNOTSUPP;
1025 return -ENOMEM;
1026 } 1084 }
1027 1085
1028 rcu_assign_pointer(help->helper, helper); 1086 rcu_assign_pointer(help->helper, helper);
@@ -1044,6 +1102,12 @@ ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[])
1044 return 0; 1102 return 0;
1045} 1103}
1046 1104
1105static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
1106 [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
1107 [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED },
1108 [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED },
1109};
1110
1047static inline int 1111static inline int
1048ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]) 1112ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[])
1049{ 1113{
@@ -1052,7 +1116,7 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]
1052 struct nf_conntrack_l4proto *l4proto; 1116 struct nf_conntrack_l4proto *l4proto;
1053 int err = 0; 1117 int err = 0;
1054 1118
1055 nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, NULL); 1119 nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy);
1056 1120
1057 rcu_read_lock(); 1121 rcu_read_lock();
1058 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 1122 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
@@ -1064,12 +1128,18 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]
1064} 1128}
1065 1129
1066#ifdef CONFIG_NF_NAT_NEEDED 1130#ifdef CONFIG_NF_NAT_NEEDED
1131static const struct nla_policy nat_seq_policy[CTA_NAT_SEQ_MAX+1] = {
1132 [CTA_NAT_SEQ_CORRECTION_POS] = { .type = NLA_U32 },
1133 [CTA_NAT_SEQ_OFFSET_BEFORE] = { .type = NLA_U32 },
1134 [CTA_NAT_SEQ_OFFSET_AFTER] = { .type = NLA_U32 },
1135};
1136
1067static inline int 1137static inline int
1068change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr) 1138change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr)
1069{ 1139{
1070 struct nlattr *cda[CTA_NAT_SEQ_MAX+1]; 1140 struct nlattr *cda[CTA_NAT_SEQ_MAX+1];
1071 1141
1072 nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, NULL); 1142 nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, nat_seq_policy);
1073 1143
1074 if (!cda[CTA_NAT_SEQ_CORRECTION_POS]) 1144 if (!cda[CTA_NAT_SEQ_CORRECTION_POS])
1075 return -EINVAL; 1145 return -EINVAL;
@@ -1175,7 +1245,8 @@ ctnetlink_change_conntrack(struct nf_conn *ct,
1175} 1245}
1176 1246
1177static struct nf_conn * 1247static struct nf_conn *
1178ctnetlink_create_conntrack(const struct nlattr * const cda[], 1248ctnetlink_create_conntrack(struct net *net, u16 zone,
1249 const struct nlattr * const cda[],
1179 struct nf_conntrack_tuple *otuple, 1250 struct nf_conntrack_tuple *otuple,
1180 struct nf_conntrack_tuple *rtuple, 1251 struct nf_conntrack_tuple *rtuple,
1181 u8 u3) 1252 u8 u3)
@@ -1184,7 +1255,7 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1184 int err = -EINVAL; 1255 int err = -EINVAL;
1185 struct nf_conntrack_helper *helper; 1256 struct nf_conntrack_helper *helper;
1186 1257
1187 ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC); 1258 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
1188 if (IS_ERR(ct)) 1259 if (IS_ERR(ct))
1189 return ERR_PTR(-ENOMEM); 1260 return ERR_PTR(-ENOMEM);
1190 1261
@@ -1193,7 +1264,6 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1193 ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT])); 1264 ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1194 1265
1195 ct->timeout.expires = jiffies + ct->timeout.expires * HZ; 1266 ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
1196 ct->status |= IPS_CONFIRMED;
1197 1267
1198 rcu_read_lock(); 1268 rcu_read_lock();
1199 if (cda[CTA_HELP]) { 1269 if (cda[CTA_HELP]) {
@@ -1203,7 +1273,8 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1203 if (err < 0) 1273 if (err < 0)
1204 goto err2; 1274 goto err2;
1205 1275
1206 helper = __nf_conntrack_helper_find_byname(helpname); 1276 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1277 nf_ct_protonum(ct));
1207 if (helper == NULL) { 1278 if (helper == NULL) {
1208 rcu_read_unlock(); 1279 rcu_read_unlock();
1209#ifdef CONFIG_MODULES 1280#ifdef CONFIG_MODULES
@@ -1213,7 +1284,9 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1213 } 1284 }
1214 1285
1215 rcu_read_lock(); 1286 rcu_read_lock();
1216 helper = __nf_conntrack_helper_find_byname(helpname); 1287 helper = __nf_conntrack_helper_find(helpname,
1288 nf_ct_l3num(ct),
1289 nf_ct_protonum(ct));
1217 if (helper) { 1290 if (helper) {
1218 err = -EAGAIN; 1291 err = -EAGAIN;
1219 goto err2; 1292 goto err2;
@@ -1236,19 +1309,24 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1236 } 1309 }
1237 } else { 1310 } else {
1238 /* try an implicit helper assignation */ 1311 /* try an implicit helper assignation */
1239 err = __nf_ct_try_assign_helper(ct, GFP_ATOMIC); 1312 err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1240 if (err < 0) 1313 if (err < 0)
1241 goto err2; 1314 goto err2;
1242 } 1315 }
1243 1316
1244 if (cda[CTA_STATUS]) { 1317 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
1245 err = ctnetlink_change_status(ct, cda); 1318 err = ctnetlink_change_nat(ct, cda);
1246 if (err < 0) 1319 if (err < 0)
1247 goto err2; 1320 goto err2;
1248 } 1321 }
1249 1322
1250 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { 1323 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1251 err = ctnetlink_change_nat(ct, cda); 1324 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
1325 /* we must add conntrack extensions before confirmation. */
1326 ct->status |= IPS_CONFIRMED;
1327
1328 if (cda[CTA_STATUS]) {
1329 err = ctnetlink_change_status(ct, cda);
1252 if (err < 0) 1330 if (err < 0)
1253 goto err2; 1331 goto err2;
1254 } 1332 }
@@ -1267,9 +1345,6 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1267 goto err2; 1345 goto err2;
1268 } 1346 }
1269 1347
1270 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1271 nf_ct_ecache_ext_add(ct, GFP_ATOMIC);
1272
1273#if defined(CONFIG_NF_CONNTRACK_MARK) 1348#if defined(CONFIG_NF_CONNTRACK_MARK)
1274 if (cda[CTA_MARK]) 1349 if (cda[CTA_MARK])
1275 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK])); 1350 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
@@ -1285,7 +1360,7 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1285 if (err < 0) 1360 if (err < 0)
1286 goto err2; 1361 goto err2;
1287 1362
1288 master_h = nf_conntrack_find_get(&init_net, &master); 1363 master_h = nf_conntrack_find_get(net, zone, &master);
1289 if (master_h == NULL) { 1364 if (master_h == NULL) {
1290 err = -ENOENT; 1365 err = -ENOENT;
1291 goto err2; 1366 goto err2;
@@ -1313,11 +1388,17 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1313 const struct nlmsghdr *nlh, 1388 const struct nlmsghdr *nlh,
1314 const struct nlattr * const cda[]) 1389 const struct nlattr * const cda[])
1315{ 1390{
1391 struct net *net = sock_net(ctnl);
1316 struct nf_conntrack_tuple otuple, rtuple; 1392 struct nf_conntrack_tuple otuple, rtuple;
1317 struct nf_conntrack_tuple_hash *h = NULL; 1393 struct nf_conntrack_tuple_hash *h = NULL;
1318 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1394 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1319 u_int8_t u3 = nfmsg->nfgen_family; 1395 u_int8_t u3 = nfmsg->nfgen_family;
1320 int err = 0; 1396 u16 zone;
1397 int err;
1398
1399 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1400 if (err < 0)
1401 return err;
1321 1402
1322 if (cda[CTA_TUPLE_ORIG]) { 1403 if (cda[CTA_TUPLE_ORIG]) {
1323 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3); 1404 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
@@ -1333,9 +1414,9 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1333 1414
1334 spin_lock_bh(&nf_conntrack_lock); 1415 spin_lock_bh(&nf_conntrack_lock);
1335 if (cda[CTA_TUPLE_ORIG]) 1416 if (cda[CTA_TUPLE_ORIG])
1336 h = __nf_conntrack_find(&init_net, &otuple); 1417 h = __nf_conntrack_find(net, zone, &otuple);
1337 else if (cda[CTA_TUPLE_REPLY]) 1418 else if (cda[CTA_TUPLE_REPLY])
1338 h = __nf_conntrack_find(&init_net, &rtuple); 1419 h = __nf_conntrack_find(net, zone, &rtuple);
1339 1420
1340 if (h == NULL) { 1421 if (h == NULL) {
1341 err = -ENOENT; 1422 err = -ENOENT;
@@ -1343,7 +1424,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1343 struct nf_conn *ct; 1424 struct nf_conn *ct;
1344 enum ip_conntrack_events events; 1425 enum ip_conntrack_events events;
1345 1426
1346 ct = ctnetlink_create_conntrack(cda, &otuple, 1427 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
1347 &rtuple, u3); 1428 &rtuple, u3);
1348 if (IS_ERR(ct)) { 1429 if (IS_ERR(ct)) {
1349 err = PTR_ERR(ct); 1430 err = PTR_ERR(ct);
@@ -1357,7 +1438,8 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1357 else 1438 else
1358 events = IPCT_NEW; 1439 events = IPCT_NEW;
1359 1440
1360 nf_conntrack_eventmask_report((1 << IPCT_STATUS) | 1441 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1442 (1 << IPCT_ASSURED) |
1361 (1 << IPCT_HELPER) | 1443 (1 << IPCT_HELPER) |
1362 (1 << IPCT_PROTOINFO) | 1444 (1 << IPCT_PROTOINFO) |
1363 (1 << IPCT_NATSEQADJ) | 1445 (1 << IPCT_NATSEQADJ) |
@@ -1382,7 +1464,8 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1382 if (err == 0) { 1464 if (err == 0) {
1383 nf_conntrack_get(&ct->ct_general); 1465 nf_conntrack_get(&ct->ct_general);
1384 spin_unlock_bh(&nf_conntrack_lock); 1466 spin_unlock_bh(&nf_conntrack_lock);
1385 nf_conntrack_eventmask_report((1 << IPCT_STATUS) | 1467 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1468 (1 << IPCT_ASSURED) |
1386 (1 << IPCT_HELPER) | 1469 (1 << IPCT_HELPER) |
1387 (1 << IPCT_PROTOINFO) | 1470 (1 << IPCT_PROTOINFO) |
1388 (1 << IPCT_NATSEQADJ) | 1471 (1 << IPCT_NATSEQADJ) |
@@ -1437,8 +1520,9 @@ ctnetlink_exp_dump_mask(struct sk_buff *skb,
1437 struct nlattr *nest_parms; 1520 struct nlattr *nest_parms;
1438 1521
1439 memset(&m, 0xFF, sizeof(m)); 1522 memset(&m, 0xFF, sizeof(m));
1440 m.src.u.all = mask->src.u.all;
1441 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3)); 1523 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
1524 m.src.u.all = mask->src.u.all;
1525 m.dst.protonum = tuple->dst.protonum;
1442 1526
1443 nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED); 1527 nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED);
1444 if (!nest_parms) 1528 if (!nest_parms)
@@ -1468,6 +1552,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
1468 const struct nf_conntrack_expect *exp) 1552 const struct nf_conntrack_expect *exp)
1469{ 1553{
1470 struct nf_conn *master = exp->master; 1554 struct nf_conn *master = exp->master;
1555 struct nf_conntrack_helper *helper;
1471 long timeout = (exp->timeout.expires - jiffies) / HZ; 1556 long timeout = (exp->timeout.expires - jiffies) / HZ;
1472 1557
1473 if (timeout < 0) 1558 if (timeout < 0)
@@ -1484,6 +1569,9 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
1484 1569
1485 NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)); 1570 NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout));
1486 NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)); 1571 NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp));
1572 helper = rcu_dereference(nfct_help(master)->helper);
1573 if (helper)
1574 NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name);
1487 1575
1488 return 0; 1576 return 0;
1489 1577
@@ -1525,9 +1613,10 @@ nla_put_failure:
1525static int 1613static int
1526ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item) 1614ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
1527{ 1615{
1616 struct nf_conntrack_expect *exp = item->exp;
1617 struct net *net = nf_ct_exp_net(exp);
1528 struct nlmsghdr *nlh; 1618 struct nlmsghdr *nlh;
1529 struct nfgenmsg *nfmsg; 1619 struct nfgenmsg *nfmsg;
1530 struct nf_conntrack_expect *exp = item->exp;
1531 struct sk_buff *skb; 1620 struct sk_buff *skb;
1532 unsigned int type; 1621 unsigned int type;
1533 int flags = 0; 1622 int flags = 0;
@@ -1539,7 +1628,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
1539 return 0; 1628 return 0;
1540 1629
1541 if (!item->report && 1630 if (!item->report &&
1542 !nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW)) 1631 !nfnetlink_has_listeners(net, NFNLGRP_CONNTRACK_EXP_NEW))
1543 return 0; 1632 return 0;
1544 1633
1545 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 1634 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
@@ -1562,7 +1651,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
1562 rcu_read_unlock(); 1651 rcu_read_unlock();
1563 1652
1564 nlmsg_end(skb, nlh); 1653 nlmsg_end(skb, nlh);
1565 nfnetlink_send(skb, item->pid, NFNLGRP_CONNTRACK_EXP_NEW, 1654 nfnetlink_send(skb, net, item->pid, NFNLGRP_CONNTRACK_EXP_NEW,
1566 item->report, GFP_ATOMIC); 1655 item->report, GFP_ATOMIC);
1567 return 0; 1656 return 0;
1568 1657
@@ -1572,7 +1661,7 @@ nla_put_failure:
1572nlmsg_failure: 1661nlmsg_failure:
1573 kfree_skb(skb); 1662 kfree_skb(skb);
1574errout: 1663errout:
1575 nfnetlink_set_err(0, 0, -ENOBUFS); 1664 nfnetlink_set_err(net, 0, 0, -ENOBUFS);
1576 return 0; 1665 return 0;
1577} 1666}
1578#endif 1667#endif
@@ -1586,7 +1675,7 @@ static int ctnetlink_exp_done(struct netlink_callback *cb)
1586static int 1675static int
1587ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 1676ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
1588{ 1677{
1589 struct net *net = &init_net; 1678 struct net *net = sock_net(skb->sk);
1590 struct nf_conntrack_expect *exp, *last; 1679 struct nf_conntrack_expect *exp, *last;
1591 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 1680 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1592 struct hlist_node *n; 1681 struct hlist_node *n;
@@ -1630,8 +1719,12 @@ out:
1630} 1719}
1631 1720
1632static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = { 1721static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
1722 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
1723 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
1724 [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
1633 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 }, 1725 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
1634 [CTA_EXPECT_ID] = { .type = NLA_U32 }, 1726 [CTA_EXPECT_ID] = { .type = NLA_U32 },
1727 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING },
1635}; 1728};
1636 1729
1637static int 1730static int
@@ -1639,12 +1732,14 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1639 const struct nlmsghdr *nlh, 1732 const struct nlmsghdr *nlh,
1640 const struct nlattr * const cda[]) 1733 const struct nlattr * const cda[])
1641{ 1734{
1735 struct net *net = sock_net(ctnl);
1642 struct nf_conntrack_tuple tuple; 1736 struct nf_conntrack_tuple tuple;
1643 struct nf_conntrack_expect *exp; 1737 struct nf_conntrack_expect *exp;
1644 struct sk_buff *skb2; 1738 struct sk_buff *skb2;
1645 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1739 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1646 u_int8_t u3 = nfmsg->nfgen_family; 1740 u_int8_t u3 = nfmsg->nfgen_family;
1647 int err = 0; 1741 u16 zone;
1742 int err;
1648 1743
1649 if (nlh->nlmsg_flags & NLM_F_DUMP) { 1744 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1650 return netlink_dump_start(ctnl, skb, nlh, 1745 return netlink_dump_start(ctnl, skb, nlh,
@@ -1652,6 +1747,10 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1652 ctnetlink_exp_done); 1747 ctnetlink_exp_done);
1653 } 1748 }
1654 1749
1750 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
1751 if (err < 0)
1752 return err;
1753
1655 if (cda[CTA_EXPECT_MASTER]) 1754 if (cda[CTA_EXPECT_MASTER])
1656 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3); 1755 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
1657 else 1756 else
@@ -1660,7 +1759,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1660 if (err < 0) 1759 if (err < 0)
1661 return err; 1760 return err;
1662 1761
1663 exp = nf_ct_expect_find_get(&init_net, &tuple); 1762 exp = nf_ct_expect_find_get(net, zone, &tuple);
1664 if (!exp) 1763 if (!exp)
1665 return -ENOENT; 1764 return -ENOENT;
1666 1765
@@ -1700,23 +1799,28 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1700 const struct nlmsghdr *nlh, 1799 const struct nlmsghdr *nlh,
1701 const struct nlattr * const cda[]) 1800 const struct nlattr * const cda[])
1702{ 1801{
1802 struct net *net = sock_net(ctnl);
1703 struct nf_conntrack_expect *exp; 1803 struct nf_conntrack_expect *exp;
1704 struct nf_conntrack_tuple tuple; 1804 struct nf_conntrack_tuple tuple;
1705 struct nf_conntrack_helper *h;
1706 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1805 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1707 struct hlist_node *n, *next; 1806 struct hlist_node *n, *next;
1708 u_int8_t u3 = nfmsg->nfgen_family; 1807 u_int8_t u3 = nfmsg->nfgen_family;
1709 unsigned int i; 1808 unsigned int i;
1809 u16 zone;
1710 int err; 1810 int err;
1711 1811
1712 if (cda[CTA_EXPECT_TUPLE]) { 1812 if (cda[CTA_EXPECT_TUPLE]) {
1713 /* delete a single expect by tuple */ 1813 /* delete a single expect by tuple */
1814 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
1815 if (err < 0)
1816 return err;
1817
1714 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); 1818 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
1715 if (err < 0) 1819 if (err < 0)
1716 return err; 1820 return err;
1717 1821
1718 /* bump usage count to 2 */ 1822 /* bump usage count to 2 */
1719 exp = nf_ct_expect_find_get(&init_net, &tuple); 1823 exp = nf_ct_expect_find_get(net, zone, &tuple);
1720 if (!exp) 1824 if (!exp)
1721 return -ENOENT; 1825 return -ENOENT;
1722 1826
@@ -1739,18 +1843,13 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1739 1843
1740 /* delete all expectations for this helper */ 1844 /* delete all expectations for this helper */
1741 spin_lock_bh(&nf_conntrack_lock); 1845 spin_lock_bh(&nf_conntrack_lock);
1742 h = __nf_conntrack_helper_find_byname(name);
1743 if (!h) {
1744 spin_unlock_bh(&nf_conntrack_lock);
1745 return -EOPNOTSUPP;
1746 }
1747 for (i = 0; i < nf_ct_expect_hsize; i++) { 1846 for (i = 0; i < nf_ct_expect_hsize; i++) {
1748 hlist_for_each_entry_safe(exp, n, next, 1847 hlist_for_each_entry_safe(exp, n, next,
1749 &init_net.ct.expect_hash[i], 1848 &net->ct.expect_hash[i],
1750 hnode) { 1849 hnode) {
1751 m_help = nfct_help(exp->master); 1850 m_help = nfct_help(exp->master);
1752 if (m_help->helper == h 1851 if (!strcmp(m_help->helper->name, name) &&
1753 && del_timer(&exp->timeout)) { 1852 del_timer(&exp->timeout)) {
1754 nf_ct_unlink_expect(exp); 1853 nf_ct_unlink_expect(exp);
1755 nf_ct_expect_put(exp); 1854 nf_ct_expect_put(exp);
1756 } 1855 }
@@ -1762,7 +1861,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1762 spin_lock_bh(&nf_conntrack_lock); 1861 spin_lock_bh(&nf_conntrack_lock);
1763 for (i = 0; i < nf_ct_expect_hsize; i++) { 1862 for (i = 0; i < nf_ct_expect_hsize; i++) {
1764 hlist_for_each_entry_safe(exp, n, next, 1863 hlist_for_each_entry_safe(exp, n, next,
1765 &init_net.ct.expect_hash[i], 1864 &net->ct.expect_hash[i],
1766 hnode) { 1865 hnode) {
1767 if (del_timer(&exp->timeout)) { 1866 if (del_timer(&exp->timeout)) {
1768 nf_ct_unlink_expect(exp); 1867 nf_ct_unlink_expect(exp);
@@ -1783,7 +1882,9 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
1783} 1882}
1784 1883
1785static int 1884static int
1786ctnetlink_create_expect(const struct nlattr * const cda[], u_int8_t u3, 1885ctnetlink_create_expect(struct net *net, u16 zone,
1886 const struct nlattr * const cda[],
1887 u_int8_t u3,
1787 u32 pid, int report) 1888 u32 pid, int report)
1788{ 1889{
1789 struct nf_conntrack_tuple tuple, mask, master_tuple; 1890 struct nf_conntrack_tuple tuple, mask, master_tuple;
@@ -1805,7 +1906,7 @@ ctnetlink_create_expect(const struct nlattr * const cda[], u_int8_t u3,
1805 return err; 1906 return err;
1806 1907
1807 /* Look for master conntrack of this expectation */ 1908 /* Look for master conntrack of this expectation */
1808 h = nf_conntrack_find_get(&init_net, &master_tuple); 1909 h = nf_conntrack_find_get(net, zone, &master_tuple);
1809 if (!h) 1910 if (!h)
1810 return -ENOENT; 1911 return -ENOENT;
1811 ct = nf_ct_tuplehash_to_ctrack(h); 1912 ct = nf_ct_tuplehash_to_ctrack(h);
@@ -1845,29 +1946,35 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
1845 const struct nlmsghdr *nlh, 1946 const struct nlmsghdr *nlh,
1846 const struct nlattr * const cda[]) 1947 const struct nlattr * const cda[])
1847{ 1948{
1949 struct net *net = sock_net(ctnl);
1848 struct nf_conntrack_tuple tuple; 1950 struct nf_conntrack_tuple tuple;
1849 struct nf_conntrack_expect *exp; 1951 struct nf_conntrack_expect *exp;
1850 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1952 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1851 u_int8_t u3 = nfmsg->nfgen_family; 1953 u_int8_t u3 = nfmsg->nfgen_family;
1852 int err = 0; 1954 u16 zone;
1955 int err;
1853 1956
1854 if (!cda[CTA_EXPECT_TUPLE] 1957 if (!cda[CTA_EXPECT_TUPLE]
1855 || !cda[CTA_EXPECT_MASK] 1958 || !cda[CTA_EXPECT_MASK]
1856 || !cda[CTA_EXPECT_MASTER]) 1959 || !cda[CTA_EXPECT_MASTER])
1857 return -EINVAL; 1960 return -EINVAL;
1858 1961
1962 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
1963 if (err < 0)
1964 return err;
1965
1859 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); 1966 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
1860 if (err < 0) 1967 if (err < 0)
1861 return err; 1968 return err;
1862 1969
1863 spin_lock_bh(&nf_conntrack_lock); 1970 spin_lock_bh(&nf_conntrack_lock);
1864 exp = __nf_ct_expect_find(&init_net, &tuple); 1971 exp = __nf_ct_expect_find(net, zone, &tuple);
1865 1972
1866 if (!exp) { 1973 if (!exp) {
1867 spin_unlock_bh(&nf_conntrack_lock); 1974 spin_unlock_bh(&nf_conntrack_lock);
1868 err = -ENOENT; 1975 err = -ENOENT;
1869 if (nlh->nlmsg_flags & NLM_F_CREATE) { 1976 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1870 err = ctnetlink_create_expect(cda, 1977 err = ctnetlink_create_expect(net, zone, cda,
1871 u3, 1978 u3,
1872 NETLINK_CB(skb).pid, 1979 NETLINK_CB(skb).pid,
1873 nlmsg_report(nlh)); 1980 nlmsg_report(nlh));
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 3807ac7faf4c..088944824e13 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -28,6 +28,7 @@
28#include <net/netfilter/nf_conntrack.h> 28#include <net/netfilter/nf_conntrack.h>
29#include <net/netfilter/nf_conntrack_core.h> 29#include <net/netfilter/nf_conntrack_core.h>
30#include <net/netfilter/nf_conntrack_helper.h> 30#include <net/netfilter/nf_conntrack_helper.h>
31#include <net/netfilter/nf_conntrack_zones.h>
31#include <linux/netfilter/nf_conntrack_proto_gre.h> 32#include <linux/netfilter/nf_conntrack_proto_gre.h>
32#include <linux/netfilter/nf_conntrack_pptp.h> 33#include <linux/netfilter/nf_conntrack_pptp.h>
33 34
@@ -123,7 +124,7 @@ static void pptp_expectfn(struct nf_conn *ct,
123 pr_debug("trying to unexpect other dir: "); 124 pr_debug("trying to unexpect other dir: ");
124 nf_ct_dump_tuple(&inv_t); 125 nf_ct_dump_tuple(&inv_t);
125 126
126 exp_other = nf_ct_expect_find_get(net, &inv_t); 127 exp_other = nf_ct_expect_find_get(net, nf_ct_zone(ct), &inv_t);
127 if (exp_other) { 128 if (exp_other) {
128 /* delete other expectation. */ 129 /* delete other expectation. */
129 pr_debug("found\n"); 130 pr_debug("found\n");
@@ -136,17 +137,18 @@ static void pptp_expectfn(struct nf_conn *ct,
136 rcu_read_unlock(); 137 rcu_read_unlock();
137} 138}
138 139
139static int destroy_sibling_or_exp(struct net *net, 140static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct,
140 const struct nf_conntrack_tuple *t) 141 const struct nf_conntrack_tuple *t)
141{ 142{
142 const struct nf_conntrack_tuple_hash *h; 143 const struct nf_conntrack_tuple_hash *h;
143 struct nf_conntrack_expect *exp; 144 struct nf_conntrack_expect *exp;
144 struct nf_conn *sibling; 145 struct nf_conn *sibling;
146 u16 zone = nf_ct_zone(ct);
145 147
146 pr_debug("trying to timeout ct or exp for tuple "); 148 pr_debug("trying to timeout ct or exp for tuple ");
147 nf_ct_dump_tuple(t); 149 nf_ct_dump_tuple(t);
148 150
149 h = nf_conntrack_find_get(net, t); 151 h = nf_conntrack_find_get(net, zone, t);
150 if (h) { 152 if (h) {
151 sibling = nf_ct_tuplehash_to_ctrack(h); 153 sibling = nf_ct_tuplehash_to_ctrack(h);
152 pr_debug("setting timeout of conntrack %p to 0\n", sibling); 154 pr_debug("setting timeout of conntrack %p to 0\n", sibling);
@@ -157,7 +159,7 @@ static int destroy_sibling_or_exp(struct net *net,
157 nf_ct_put(sibling); 159 nf_ct_put(sibling);
158 return 1; 160 return 1;
159 } else { 161 } else {
160 exp = nf_ct_expect_find_get(net, t); 162 exp = nf_ct_expect_find_get(net, zone, t);
161 if (exp) { 163 if (exp) {
162 pr_debug("unexpect_related of expect %p\n", exp); 164 pr_debug("unexpect_related of expect %p\n", exp);
163 nf_ct_unexpect_related(exp); 165 nf_ct_unexpect_related(exp);
@@ -182,7 +184,7 @@ static void pptp_destroy_siblings(struct nf_conn *ct)
182 t.dst.protonum = IPPROTO_GRE; 184 t.dst.protonum = IPPROTO_GRE;
183 t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id; 185 t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id;
184 t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id; 186 t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id;
185 if (!destroy_sibling_or_exp(net, &t)) 187 if (!destroy_sibling_or_exp(net, ct, &t))
186 pr_debug("failed to timeout original pns->pac ct/exp\n"); 188 pr_debug("failed to timeout original pns->pac ct/exp\n");
187 189
188 /* try reply (pac->pns) tuple */ 190 /* try reply (pac->pns) tuple */
@@ -190,7 +192,7 @@ static void pptp_destroy_siblings(struct nf_conn *ct)
190 t.dst.protonum = IPPROTO_GRE; 192 t.dst.protonum = IPPROTO_GRE;
191 t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id; 193 t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id;
192 t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id; 194 t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id;
193 if (!destroy_sibling_or_exp(net, &t)) 195 if (!destroy_sibling_or_exp(net, ct, &t))
194 pr_debug("failed to timeout reply pac->pns ct/exp\n"); 196 pr_debug("failed to timeout reply pac->pns ct/exp\n");
195} 197}
196 198
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index dd375500dccc..9a2815549375 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -561,8 +561,9 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
561 return NF_ACCEPT; 561 return NF_ACCEPT;
562} 562}
563 563
564static int dccp_error(struct net *net, struct sk_buff *skb, 564static int dccp_error(struct net *net, struct nf_conn *tmpl,
565 unsigned int dataoff, enum ip_conntrack_info *ctinfo, 565 struct sk_buff *skb, unsigned int dataoff,
566 enum ip_conntrack_info *ctinfo,
566 u_int8_t pf, unsigned int hooknum) 567 u_int8_t pf, unsigned int hooknum)
567{ 568{
568 struct dccp_hdr _dh, *dh; 569 struct dccp_hdr _dh, *dh;
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index c99cfba64ddc..d899b1a69940 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -241,7 +241,7 @@ static int gre_packet(struct nf_conn *ct,
241 ct->proto.gre.stream_timeout); 241 ct->proto.gre.stream_timeout);
242 /* Also, more likely to be important, and not a probe. */ 242 /* Also, more likely to be important, and not a probe. */
243 set_bit(IPS_ASSURED_BIT, &ct->status); 243 set_bit(IPS_ASSURED_BIT, &ct->status);
244 nf_conntrack_event_cache(IPCT_STATUS, ct); 244 nf_conntrack_event_cache(IPCT_ASSURED, ct);
245 } else 245 } else
246 nf_ct_refresh_acct(ct, ctinfo, skb, 246 nf_ct_refresh_acct(ct, ctinfo, skb,
247 ct->proto.gre.timeout); 247 ct->proto.gre.timeout);
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index f9d930f80276..b68ff15ed979 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -377,7 +377,7 @@ static int sctp_packet(struct nf_conn *ct,
377 new_state == SCTP_CONNTRACK_ESTABLISHED) { 377 new_state == SCTP_CONNTRACK_ESTABLISHED) {
378 pr_debug("Setting assured bit\n"); 378 pr_debug("Setting assured bit\n");
379 set_bit(IPS_ASSURED_BIT, &ct->status); 379 set_bit(IPS_ASSURED_BIT, &ct->status);
380 nf_conntrack_event_cache(IPCT_STATUS, ct); 380 nf_conntrack_event_cache(IPCT_ASSURED, ct);
381 } 381 }
382 382
383 return NF_ACCEPT; 383 return NF_ACCEPT;
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 3c96437b45ad..9dd8cd4fb6e6 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -760,7 +760,7 @@ static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) + 1] =
760}; 760};
761 761
762/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */ 762/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
763static int tcp_error(struct net *net, 763static int tcp_error(struct net *net, struct nf_conn *tmpl,
764 struct sk_buff *skb, 764 struct sk_buff *skb,
765 unsigned int dataoff, 765 unsigned int dataoff,
766 enum ip_conntrack_info *ctinfo, 766 enum ip_conntrack_info *ctinfo,
@@ -1045,7 +1045,7 @@ static int tcp_packet(struct nf_conn *ct,
1045 after SYN_RECV or a valid answer for a picked up 1045 after SYN_RECV or a valid answer for a picked up
1046 connection. */ 1046 connection. */
1047 set_bit(IPS_ASSURED_BIT, &ct->status); 1047 set_bit(IPS_ASSURED_BIT, &ct->status);
1048 nf_conntrack_event_cache(IPCT_STATUS, ct); 1048 nf_conntrack_event_cache(IPCT_ASSURED, ct);
1049 } 1049 }
1050 nf_ct_refresh_acct(ct, ctinfo, skb, timeout); 1050 nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
1051 1051
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 5c5518bedb4b..8289088b8218 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -77,7 +77,7 @@ static int udp_packet(struct nf_conn *ct,
77 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout_stream); 77 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout_stream);
78 /* Also, more likely to be important, and not a probe */ 78 /* Also, more likely to be important, and not a probe */
79 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) 79 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
80 nf_conntrack_event_cache(IPCT_STATUS, ct); 80 nf_conntrack_event_cache(IPCT_ASSURED, ct);
81 } else 81 } else
82 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout); 82 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout);
83 83
@@ -91,8 +91,8 @@ static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
91 return true; 91 return true;
92} 92}
93 93
94static int udp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff, 94static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
95 enum ip_conntrack_info *ctinfo, 95 unsigned int dataoff, enum ip_conntrack_info *ctinfo,
96 u_int8_t pf, 96 u_int8_t pf,
97 unsigned int hooknum) 97 unsigned int hooknum)
98{ 98{
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 458655bb2106..263b5a72588d 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -75,7 +75,7 @@ static int udplite_packet(struct nf_conn *ct,
75 nf_ct_udplite_timeout_stream); 75 nf_ct_udplite_timeout_stream);
76 /* Also, more likely to be important, and not a probe */ 76 /* Also, more likely to be important, and not a probe */
77 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) 77 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
78 nf_conntrack_event_cache(IPCT_STATUS, ct); 78 nf_conntrack_event_cache(IPCT_ASSURED, ct);
79 } else 79 } else
80 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udplite_timeout); 80 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udplite_timeout);
81 81
@@ -89,7 +89,7 @@ static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb,
89 return true; 89 return true;
90} 90}
91 91
92static int udplite_error(struct net *net, 92static int udplite_error(struct net *net, struct nf_conn *tmpl,
93 struct sk_buff *skb, 93 struct sk_buff *skb,
94 unsigned int dataoff, 94 unsigned int dataoff,
95 enum ip_conntrack_info *ctinfo, 95 enum ip_conntrack_info *ctinfo,
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 4b572163784b..c6cd1b84eddd 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -16,12 +16,14 @@
16#include <linux/inet.h> 16#include <linux/inet.h>
17#include <linux/in.h> 17#include <linux/in.h>
18#include <linux/udp.h> 18#include <linux/udp.h>
19#include <linux/tcp.h>
19#include <linux/netfilter.h> 20#include <linux/netfilter.h>
20 21
21#include <net/netfilter/nf_conntrack.h> 22#include <net/netfilter/nf_conntrack.h>
22#include <net/netfilter/nf_conntrack_core.h> 23#include <net/netfilter/nf_conntrack_core.h>
23#include <net/netfilter/nf_conntrack_expect.h> 24#include <net/netfilter/nf_conntrack_expect.h>
24#include <net/netfilter/nf_conntrack_helper.h> 25#include <net/netfilter/nf_conntrack_helper.h>
26#include <net/netfilter/nf_conntrack_zones.h>
25#include <linux/netfilter/nf_conntrack_sip.h> 27#include <linux/netfilter/nf_conntrack_sip.h>
26 28
27MODULE_LICENSE("GPL"); 29MODULE_LICENSE("GPL");
@@ -50,12 +52,16 @@ module_param(sip_direct_media, int, 0600);
50MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling " 52MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
51 "endpoints only (default 1)"); 53 "endpoints only (default 1)");
52 54
53unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, 55unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int dataoff,
54 const char **dptr, 56 const char **dptr,
55 unsigned int *datalen) __read_mostly; 57 unsigned int *datalen) __read_mostly;
56EXPORT_SYMBOL_GPL(nf_nat_sip_hook); 58EXPORT_SYMBOL_GPL(nf_nat_sip_hook);
57 59
60void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off) __read_mostly;
61EXPORT_SYMBOL_GPL(nf_nat_sip_seq_adjust_hook);
62
58unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb, 63unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
64 unsigned int dataoff,
59 const char **dptr, 65 const char **dptr,
60 unsigned int *datalen, 66 unsigned int *datalen,
61 struct nf_conntrack_expect *exp, 67 struct nf_conntrack_expect *exp,
@@ -63,17 +69,17 @@ unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
63 unsigned int matchlen) __read_mostly; 69 unsigned int matchlen) __read_mostly;
64EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook); 70EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook);
65 71
66unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, 72unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int dataoff,
67 const char **dptr, 73 const char **dptr,
68 unsigned int dataoff,
69 unsigned int *datalen, 74 unsigned int *datalen,
75 unsigned int sdpoff,
70 enum sdp_header_types type, 76 enum sdp_header_types type,
71 enum sdp_header_types term, 77 enum sdp_header_types term,
72 const union nf_inet_addr *addr) 78 const union nf_inet_addr *addr)
73 __read_mostly; 79 __read_mostly;
74EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook); 80EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook);
75 81
76unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, 82unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int dataoff,
77 const char **dptr, 83 const char **dptr,
78 unsigned int *datalen, 84 unsigned int *datalen,
79 unsigned int matchoff, 85 unsigned int matchoff,
@@ -82,14 +88,15 @@ unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
82EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook); 88EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook);
83 89
84unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb, 90unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
85 const char **dptr,
86 unsigned int dataoff, 91 unsigned int dataoff,
92 const char **dptr,
87 unsigned int *datalen, 93 unsigned int *datalen,
94 unsigned int sdpoff,
88 const union nf_inet_addr *addr) 95 const union nf_inet_addr *addr)
89 __read_mostly; 96 __read_mostly;
90EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook); 97EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook);
91 98
92unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, 99unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int dataoff,
93 const char **dptr, 100 const char **dptr,
94 unsigned int *datalen, 101 unsigned int *datalen,
95 struct nf_conntrack_expect *rtp_exp, 102 struct nf_conntrack_expect *rtp_exp,
@@ -236,12 +243,13 @@ int ct_sip_parse_request(const struct nf_conn *ct,
236 return 0; 243 return 0;
237 244
238 /* Find SIP URI */ 245 /* Find SIP URI */
239 limit -= strlen("sip:"); 246 for (; dptr < limit - strlen("sip:"); dptr++) {
240 for (; dptr < limit; dptr++) {
241 if (*dptr == '\r' || *dptr == '\n') 247 if (*dptr == '\r' || *dptr == '\n')
242 return -1; 248 return -1;
243 if (strnicmp(dptr, "sip:", strlen("sip:")) == 0) 249 if (strnicmp(dptr, "sip:", strlen("sip:")) == 0) {
250 dptr += strlen("sip:");
244 break; 251 break;
252 }
245 } 253 }
246 if (!skp_epaddr_len(ct, dptr, limit, &shift)) 254 if (!skp_epaddr_len(ct, dptr, limit, &shift))
247 return 0; 255 return 0;
@@ -276,7 +284,7 @@ EXPORT_SYMBOL_GPL(ct_sip_parse_request);
276 * tabs, spaces and continuation lines, which are treated as a single whitespace 284 * tabs, spaces and continuation lines, which are treated as a single whitespace
277 * character. 285 * character.
278 * 286 *
279 * Some headers may appear multiple times. A comma seperated list of values is 287 * Some headers may appear multiple times. A comma separated list of values is
280 * equivalent to multiple headers. 288 * equivalent to multiple headers.
281 */ 289 */
282static const struct sip_header ct_sip_hdrs[] = { 290static const struct sip_header ct_sip_hdrs[] = {
@@ -284,7 +292,8 @@ static const struct sip_header ct_sip_hdrs[] = {
284 [SIP_HDR_FROM] = SIP_HDR("From", "f", "sip:", skp_epaddr_len), 292 [SIP_HDR_FROM] = SIP_HDR("From", "f", "sip:", skp_epaddr_len),
285 [SIP_HDR_TO] = SIP_HDR("To", "t", "sip:", skp_epaddr_len), 293 [SIP_HDR_TO] = SIP_HDR("To", "t", "sip:", skp_epaddr_len),
286 [SIP_HDR_CONTACT] = SIP_HDR("Contact", "m", "sip:", skp_epaddr_len), 294 [SIP_HDR_CONTACT] = SIP_HDR("Contact", "m", "sip:", skp_epaddr_len),
287 [SIP_HDR_VIA] = SIP_HDR("Via", "v", "UDP ", epaddr_len), 295 [SIP_HDR_VIA_UDP] = SIP_HDR("Via", "v", "UDP ", epaddr_len),
296 [SIP_HDR_VIA_TCP] = SIP_HDR("Via", "v", "TCP ", epaddr_len),
288 [SIP_HDR_EXPIRES] = SIP_HDR("Expires", NULL, NULL, digits_len), 297 [SIP_HDR_EXPIRES] = SIP_HDR("Expires", NULL, NULL, digits_len),
289 [SIP_HDR_CONTENT_LENGTH] = SIP_HDR("Content-Length", "l", NULL, digits_len), 298 [SIP_HDR_CONTENT_LENGTH] = SIP_HDR("Content-Length", "l", NULL, digits_len),
290}; 299};
@@ -376,7 +385,7 @@ int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
376 dptr += hdr->len; 385 dptr += hdr->len;
377 else if (hdr->cname && limit - dptr >= hdr->clen + 1 && 386 else if (hdr->cname && limit - dptr >= hdr->clen + 1 &&
378 strnicmp(dptr, hdr->cname, hdr->clen) == 0 && 387 strnicmp(dptr, hdr->cname, hdr->clen) == 0 &&
379 !isalpha(*(dptr + hdr->clen + 1))) 388 !isalpha(*(dptr + hdr->clen)))
380 dptr += hdr->clen; 389 dptr += hdr->clen;
381 else 390 else
382 continue; 391 continue;
@@ -412,7 +421,7 @@ int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
412} 421}
413EXPORT_SYMBOL_GPL(ct_sip_get_header); 422EXPORT_SYMBOL_GPL(ct_sip_get_header);
414 423
415/* Get next header field in a list of comma seperated values */ 424/* Get next header field in a list of comma separated values */
416static int ct_sip_next_header(const struct nf_conn *ct, const char *dptr, 425static int ct_sip_next_header(const struct nf_conn *ct, const char *dptr,
417 unsigned int dataoff, unsigned int datalen, 426 unsigned int dataoff, unsigned int datalen,
418 enum sip_header_types type, 427 enum sip_header_types type,
@@ -516,6 +525,33 @@ int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
516} 525}
517EXPORT_SYMBOL_GPL(ct_sip_parse_header_uri); 526EXPORT_SYMBOL_GPL(ct_sip_parse_header_uri);
518 527
528static int ct_sip_parse_param(const struct nf_conn *ct, const char *dptr,
529 unsigned int dataoff, unsigned int datalen,
530 const char *name,
531 unsigned int *matchoff, unsigned int *matchlen)
532{
533 const char *limit = dptr + datalen;
534 const char *start;
535 const char *end;
536
537 limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(","));
538 if (!limit)
539 limit = dptr + datalen;
540
541 start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name));
542 if (!start)
543 return 0;
544 start += strlen(name);
545
546 end = ct_sip_header_search(start, limit, ";", strlen(";"));
547 if (!end)
548 end = limit;
549
550 *matchoff = start - dptr;
551 *matchlen = end - start;
552 return 1;
553}
554
519/* Parse address from header parameter and return address, offset and length */ 555/* Parse address from header parameter and return address, offset and length */
520int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr, 556int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
521 unsigned int dataoff, unsigned int datalen, 557 unsigned int dataoff, unsigned int datalen,
@@ -574,6 +610,29 @@ int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
574} 610}
575EXPORT_SYMBOL_GPL(ct_sip_parse_numerical_param); 611EXPORT_SYMBOL_GPL(ct_sip_parse_numerical_param);
576 612
613static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr,
614 unsigned int dataoff, unsigned int datalen,
615 u8 *proto)
616{
617 unsigned int matchoff, matchlen;
618
619 if (ct_sip_parse_param(ct, dptr, dataoff, datalen, "transport=",
620 &matchoff, &matchlen)) {
621 if (!strnicmp(dptr + matchoff, "TCP", strlen("TCP")))
622 *proto = IPPROTO_TCP;
623 else if (!strnicmp(dptr + matchoff, "UDP", strlen("UDP")))
624 *proto = IPPROTO_UDP;
625 else
626 return 0;
627
628 if (*proto != nf_ct_protonum(ct))
629 return 0;
630 } else
631 *proto = nf_ct_protonum(ct);
632
633 return 1;
634}
635
577/* SDP header parsing: a SDP session description contains an ordered set of 636/* SDP header parsing: a SDP session description contains an ordered set of
578 * headers, starting with a section containing general session parameters, 637 * headers, starting with a section containing general session parameters,
579 * optionally followed by multiple media descriptions. 638 * optionally followed by multiple media descriptions.
@@ -682,7 +741,7 @@ static int ct_sip_parse_sdp_addr(const struct nf_conn *ct, const char *dptr,
682 741
683static int refresh_signalling_expectation(struct nf_conn *ct, 742static int refresh_signalling_expectation(struct nf_conn *ct,
684 union nf_inet_addr *addr, 743 union nf_inet_addr *addr,
685 __be16 port, 744 u8 proto, __be16 port,
686 unsigned int expires) 745 unsigned int expires)
687{ 746{
688 struct nf_conn_help *help = nfct_help(ct); 747 struct nf_conn_help *help = nfct_help(ct);
@@ -694,6 +753,7 @@ static int refresh_signalling_expectation(struct nf_conn *ct,
694 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { 753 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
695 if (exp->class != SIP_EXPECT_SIGNALLING || 754 if (exp->class != SIP_EXPECT_SIGNALLING ||
696 !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) || 755 !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) ||
756 exp->tuple.dst.protonum != proto ||
697 exp->tuple.dst.u.udp.port != port) 757 exp->tuple.dst.u.udp.port != port)
698 continue; 758 continue;
699 if (!del_timer(&exp->timeout)) 759 if (!del_timer(&exp->timeout))
@@ -728,7 +788,7 @@ static void flush_expectations(struct nf_conn *ct, bool media)
728 spin_unlock_bh(&nf_conntrack_lock); 788 spin_unlock_bh(&nf_conntrack_lock);
729} 789}
730 790
731static int set_expected_rtp_rtcp(struct sk_buff *skb, 791static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff,
732 const char **dptr, unsigned int *datalen, 792 const char **dptr, unsigned int *datalen,
733 union nf_inet_addr *daddr, __be16 port, 793 union nf_inet_addr *daddr, __be16 port,
734 enum sip_expectation_classes class, 794 enum sip_expectation_classes class,
@@ -777,7 +837,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb,
777 837
778 rcu_read_lock(); 838 rcu_read_lock();
779 do { 839 do {
780 exp = __nf_ct_expect_find(net, &tuple); 840 exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple);
781 841
782 if (!exp || exp->master == ct || 842 if (!exp || exp->master == ct ||
783 nfct_help(exp->master)->helper != nfct_help(ct)->helper || 843 nfct_help(exp->master)->helper != nfct_help(ct)->helper ||
@@ -805,7 +865,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb,
805 if (direct_rtp) { 865 if (direct_rtp) {
806 nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook); 866 nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook);
807 if (nf_nat_sdp_port && 867 if (nf_nat_sdp_port &&
808 !nf_nat_sdp_port(skb, dptr, datalen, 868 !nf_nat_sdp_port(skb, dataoff, dptr, datalen,
809 mediaoff, medialen, ntohs(rtp_port))) 869 mediaoff, medialen, ntohs(rtp_port)))
810 goto err1; 870 goto err1;
811 } 871 }
@@ -827,7 +887,8 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb,
827 887
828 nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook); 888 nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook);
829 if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp) 889 if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp)
830 ret = nf_nat_sdp_media(skb, dptr, datalen, rtp_exp, rtcp_exp, 890 ret = nf_nat_sdp_media(skb, dataoff, dptr, datalen,
891 rtp_exp, rtcp_exp,
831 mediaoff, medialen, daddr); 892 mediaoff, medialen, daddr);
832 else { 893 else {
833 if (nf_ct_expect_related(rtp_exp) == 0) { 894 if (nf_ct_expect_related(rtp_exp) == 0) {
@@ -847,6 +908,7 @@ err1:
847static const struct sdp_media_type sdp_media_types[] = { 908static const struct sdp_media_type sdp_media_types[] = {
848 SDP_MEDIA_TYPE("audio ", SIP_EXPECT_AUDIO), 909 SDP_MEDIA_TYPE("audio ", SIP_EXPECT_AUDIO),
849 SDP_MEDIA_TYPE("video ", SIP_EXPECT_VIDEO), 910 SDP_MEDIA_TYPE("video ", SIP_EXPECT_VIDEO),
911 SDP_MEDIA_TYPE("image ", SIP_EXPECT_IMAGE),
850}; 912};
851 913
852static const struct sdp_media_type *sdp_media_type(const char *dptr, 914static const struct sdp_media_type *sdp_media_type(const char *dptr,
@@ -866,13 +928,12 @@ static const struct sdp_media_type *sdp_media_type(const char *dptr,
866 return NULL; 928 return NULL;
867} 929}
868 930
869static int process_sdp(struct sk_buff *skb, 931static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
870 const char **dptr, unsigned int *datalen, 932 const char **dptr, unsigned int *datalen,
871 unsigned int cseq) 933 unsigned int cseq)
872{ 934{
873 enum ip_conntrack_info ctinfo; 935 enum ip_conntrack_info ctinfo;
874 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 936 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
875 struct nf_conn_help *help = nfct_help(ct);
876 unsigned int matchoff, matchlen; 937 unsigned int matchoff, matchlen;
877 unsigned int mediaoff, medialen; 938 unsigned int mediaoff, medialen;
878 unsigned int sdpoff; 939 unsigned int sdpoff;
@@ -941,7 +1002,7 @@ static int process_sdp(struct sk_buff *skb,
941 else 1002 else
942 return NF_DROP; 1003 return NF_DROP;
943 1004
944 ret = set_expected_rtp_rtcp(skb, dptr, datalen, 1005 ret = set_expected_rtp_rtcp(skb, dataoff, dptr, datalen,
945 &rtp_addr, htons(port), t->class, 1006 &rtp_addr, htons(port), t->class,
946 mediaoff, medialen); 1007 mediaoff, medialen);
947 if (ret != NF_ACCEPT) 1008 if (ret != NF_ACCEPT)
@@ -949,8 +1010,9 @@ static int process_sdp(struct sk_buff *skb,
949 1010
950 /* Update media connection address if present */ 1011 /* Update media connection address if present */
951 if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) { 1012 if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) {
952 ret = nf_nat_sdp_addr(skb, dptr, mediaoff, datalen, 1013 ret = nf_nat_sdp_addr(skb, dataoff, dptr, datalen,
953 c_hdr, SDP_HDR_MEDIA, &rtp_addr); 1014 mediaoff, c_hdr, SDP_HDR_MEDIA,
1015 &rtp_addr);
954 if (ret != NF_ACCEPT) 1016 if (ret != NF_ACCEPT)
955 return ret; 1017 return ret;
956 } 1018 }
@@ -960,14 +1022,12 @@ static int process_sdp(struct sk_buff *skb,
960 /* Update session connection and owner addresses */ 1022 /* Update session connection and owner addresses */
961 nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook); 1023 nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook);
962 if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK) 1024 if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK)
963 ret = nf_nat_sdp_session(skb, dptr, sdpoff, datalen, &rtp_addr); 1025 ret = nf_nat_sdp_session(skb, dataoff, dptr, datalen, sdpoff,
964 1026 &rtp_addr);
965 if (ret == NF_ACCEPT && i > 0)
966 help->help.ct_sip_info.invite_cseq = cseq;
967 1027
968 return ret; 1028 return ret;
969} 1029}
970static int process_invite_response(struct sk_buff *skb, 1030static int process_invite_response(struct sk_buff *skb, unsigned int dataoff,
971 const char **dptr, unsigned int *datalen, 1031 const char **dptr, unsigned int *datalen,
972 unsigned int cseq, unsigned int code) 1032 unsigned int cseq, unsigned int code)
973{ 1033{
@@ -977,13 +1037,13 @@ static int process_invite_response(struct sk_buff *skb,
977 1037
978 if ((code >= 100 && code <= 199) || 1038 if ((code >= 100 && code <= 199) ||
979 (code >= 200 && code <= 299)) 1039 (code >= 200 && code <= 299))
980 return process_sdp(skb, dptr, datalen, cseq); 1040 return process_sdp(skb, dataoff, dptr, datalen, cseq);
981 else if (help->help.ct_sip_info.invite_cseq == cseq) 1041 else if (help->help.ct_sip_info.invite_cseq == cseq)
982 flush_expectations(ct, true); 1042 flush_expectations(ct, true);
983 return NF_ACCEPT; 1043 return NF_ACCEPT;
984} 1044}
985 1045
986static int process_update_response(struct sk_buff *skb, 1046static int process_update_response(struct sk_buff *skb, unsigned int dataoff,
987 const char **dptr, unsigned int *datalen, 1047 const char **dptr, unsigned int *datalen,
988 unsigned int cseq, unsigned int code) 1048 unsigned int cseq, unsigned int code)
989{ 1049{
@@ -993,13 +1053,13 @@ static int process_update_response(struct sk_buff *skb,
993 1053
994 if ((code >= 100 && code <= 199) || 1054 if ((code >= 100 && code <= 199) ||
995 (code >= 200 && code <= 299)) 1055 (code >= 200 && code <= 299))
996 return process_sdp(skb, dptr, datalen, cseq); 1056 return process_sdp(skb, dataoff, dptr, datalen, cseq);
997 else if (help->help.ct_sip_info.invite_cseq == cseq) 1057 else if (help->help.ct_sip_info.invite_cseq == cseq)
998 flush_expectations(ct, true); 1058 flush_expectations(ct, true);
999 return NF_ACCEPT; 1059 return NF_ACCEPT;
1000} 1060}
1001 1061
1002static int process_prack_response(struct sk_buff *skb, 1062static int process_prack_response(struct sk_buff *skb, unsigned int dataoff,
1003 const char **dptr, unsigned int *datalen, 1063 const char **dptr, unsigned int *datalen,
1004 unsigned int cseq, unsigned int code) 1064 unsigned int cseq, unsigned int code)
1005{ 1065{
@@ -1009,13 +1069,29 @@ static int process_prack_response(struct sk_buff *skb,
1009 1069
1010 if ((code >= 100 && code <= 199) || 1070 if ((code >= 100 && code <= 199) ||
1011 (code >= 200 && code <= 299)) 1071 (code >= 200 && code <= 299))
1012 return process_sdp(skb, dptr, datalen, cseq); 1072 return process_sdp(skb, dataoff, dptr, datalen, cseq);
1013 else if (help->help.ct_sip_info.invite_cseq == cseq) 1073 else if (help->help.ct_sip_info.invite_cseq == cseq)
1014 flush_expectations(ct, true); 1074 flush_expectations(ct, true);
1015 return NF_ACCEPT; 1075 return NF_ACCEPT;
1016} 1076}
1017 1077
1018static int process_bye_request(struct sk_buff *skb, 1078static int process_invite_request(struct sk_buff *skb, unsigned int dataoff,
1079 const char **dptr, unsigned int *datalen,
1080 unsigned int cseq)
1081{
1082 enum ip_conntrack_info ctinfo;
1083 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1084 struct nf_conn_help *help = nfct_help(ct);
1085 unsigned int ret;
1086
1087 flush_expectations(ct, true);
1088 ret = process_sdp(skb, dataoff, dptr, datalen, cseq);
1089 if (ret == NF_ACCEPT)
1090 help->help.ct_sip_info.invite_cseq = cseq;
1091 return ret;
1092}
1093
1094static int process_bye_request(struct sk_buff *skb, unsigned int dataoff,
1019 const char **dptr, unsigned int *datalen, 1095 const char **dptr, unsigned int *datalen,
1020 unsigned int cseq) 1096 unsigned int cseq)
1021{ 1097{
@@ -1030,7 +1106,7 @@ static int process_bye_request(struct sk_buff *skb,
1030 * signalling connections. The expectation is marked inactive and is activated 1106 * signalling connections. The expectation is marked inactive and is activated
1031 * when receiving a response indicating success from the registrar. 1107 * when receiving a response indicating success from the registrar.
1032 */ 1108 */
1033static int process_register_request(struct sk_buff *skb, 1109static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
1034 const char **dptr, unsigned int *datalen, 1110 const char **dptr, unsigned int *datalen,
1035 unsigned int cseq) 1111 unsigned int cseq)
1036{ 1112{
@@ -1042,6 +1118,7 @@ static int process_register_request(struct sk_buff *skb,
1042 struct nf_conntrack_expect *exp; 1118 struct nf_conntrack_expect *exp;
1043 union nf_inet_addr *saddr, daddr; 1119 union nf_inet_addr *saddr, daddr;
1044 __be16 port; 1120 __be16 port;
1121 u8 proto;
1045 unsigned int expires = 0; 1122 unsigned int expires = 0;
1046 int ret; 1123 int ret;
1047 typeof(nf_nat_sip_expect_hook) nf_nat_sip_expect; 1124 typeof(nf_nat_sip_expect_hook) nf_nat_sip_expect;
@@ -1074,6 +1151,10 @@ static int process_register_request(struct sk_buff *skb,
1074 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, &daddr)) 1151 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, &daddr))
1075 return NF_ACCEPT; 1152 return NF_ACCEPT;
1076 1153
1154 if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen, *datalen,
1155 &proto) == 0)
1156 return NF_ACCEPT;
1157
1077 if (ct_sip_parse_numerical_param(ct, *dptr, 1158 if (ct_sip_parse_numerical_param(ct, *dptr,
1078 matchoff + matchlen, *datalen, 1159 matchoff + matchlen, *datalen,
1079 "expires=", NULL, NULL, &expires) < 0) 1160 "expires=", NULL, NULL, &expires) < 0)
@@ -1093,14 +1174,14 @@ static int process_register_request(struct sk_buff *skb,
1093 saddr = &ct->tuplehash[!dir].tuple.src.u3; 1174 saddr = &ct->tuplehash[!dir].tuple.src.u3;
1094 1175
1095 nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct), 1176 nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct),
1096 saddr, &daddr, IPPROTO_UDP, NULL, &port); 1177 saddr, &daddr, proto, NULL, &port);
1097 exp->timeout.expires = sip_timeout * HZ; 1178 exp->timeout.expires = sip_timeout * HZ;
1098 exp->helper = nfct_help(ct)->helper; 1179 exp->helper = nfct_help(ct)->helper;
1099 exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE; 1180 exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE;
1100 1181
1101 nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook); 1182 nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook);
1102 if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK) 1183 if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK)
1103 ret = nf_nat_sip_expect(skb, dptr, datalen, exp, 1184 ret = nf_nat_sip_expect(skb, dataoff, dptr, datalen, exp,
1104 matchoff, matchlen); 1185 matchoff, matchlen);
1105 else { 1186 else {
1106 if (nf_ct_expect_related(exp) != 0) 1187 if (nf_ct_expect_related(exp) != 0)
@@ -1116,7 +1197,7 @@ store_cseq:
1116 return ret; 1197 return ret;
1117} 1198}
1118 1199
1119static int process_register_response(struct sk_buff *skb, 1200static int process_register_response(struct sk_buff *skb, unsigned int dataoff,
1120 const char **dptr, unsigned int *datalen, 1201 const char **dptr, unsigned int *datalen,
1121 unsigned int cseq, unsigned int code) 1202 unsigned int cseq, unsigned int code)
1122{ 1203{
@@ -1126,7 +1207,8 @@ static int process_register_response(struct sk_buff *skb,
1126 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 1207 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
1127 union nf_inet_addr addr; 1208 union nf_inet_addr addr;
1128 __be16 port; 1209 __be16 port;
1129 unsigned int matchoff, matchlen, dataoff = 0; 1210 u8 proto;
1211 unsigned int matchoff, matchlen, coff = 0;
1130 unsigned int expires = 0; 1212 unsigned int expires = 0;
1131 int in_contact = 0, ret; 1213 int in_contact = 0, ret;
1132 1214
@@ -1153,7 +1235,7 @@ static int process_register_response(struct sk_buff *skb,
1153 while (1) { 1235 while (1) {
1154 unsigned int c_expires = expires; 1236 unsigned int c_expires = expires;
1155 1237
1156 ret = ct_sip_parse_header_uri(ct, *dptr, &dataoff, *datalen, 1238 ret = ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen,
1157 SIP_HDR_CONTACT, &in_contact, 1239 SIP_HDR_CONTACT, &in_contact,
1158 &matchoff, &matchlen, 1240 &matchoff, &matchlen,
1159 &addr, &port); 1241 &addr, &port);
@@ -1166,6 +1248,10 @@ static int process_register_response(struct sk_buff *skb,
1166 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &addr)) 1248 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &addr))
1167 continue; 1249 continue;
1168 1250
1251 if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen,
1252 *datalen, &proto) == 0)
1253 continue;
1254
1169 ret = ct_sip_parse_numerical_param(ct, *dptr, 1255 ret = ct_sip_parse_numerical_param(ct, *dptr,
1170 matchoff + matchlen, 1256 matchoff + matchlen,
1171 *datalen, "expires=", 1257 *datalen, "expires=",
@@ -1174,7 +1260,8 @@ static int process_register_response(struct sk_buff *skb,
1174 return NF_DROP; 1260 return NF_DROP;
1175 if (c_expires == 0) 1261 if (c_expires == 0)
1176 break; 1262 break;
1177 if (refresh_signalling_expectation(ct, &addr, port, c_expires)) 1263 if (refresh_signalling_expectation(ct, &addr, proto, port,
1264 c_expires))
1178 return NF_ACCEPT; 1265 return NF_ACCEPT;
1179 } 1266 }
1180 1267
@@ -1184,7 +1271,7 @@ flush:
1184} 1271}
1185 1272
1186static const struct sip_handler sip_handlers[] = { 1273static const struct sip_handler sip_handlers[] = {
1187 SIP_HANDLER("INVITE", process_sdp, process_invite_response), 1274 SIP_HANDLER("INVITE", process_invite_request, process_invite_response),
1188 SIP_HANDLER("UPDATE", process_sdp, process_update_response), 1275 SIP_HANDLER("UPDATE", process_sdp, process_update_response),
1189 SIP_HANDLER("ACK", process_sdp, NULL), 1276 SIP_HANDLER("ACK", process_sdp, NULL),
1190 SIP_HANDLER("PRACK", process_sdp, process_prack_response), 1277 SIP_HANDLER("PRACK", process_sdp, process_prack_response),
@@ -1192,13 +1279,13 @@ static const struct sip_handler sip_handlers[] = {
1192 SIP_HANDLER("REGISTER", process_register_request, process_register_response), 1279 SIP_HANDLER("REGISTER", process_register_request, process_register_response),
1193}; 1280};
1194 1281
1195static int process_sip_response(struct sk_buff *skb, 1282static int process_sip_response(struct sk_buff *skb, unsigned int dataoff,
1196 const char **dptr, unsigned int *datalen) 1283 const char **dptr, unsigned int *datalen)
1197{ 1284{
1198 enum ip_conntrack_info ctinfo; 1285 enum ip_conntrack_info ctinfo;
1199 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1286 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1200 unsigned int matchoff, matchlen; 1287 unsigned int matchoff, matchlen, matchend;
1201 unsigned int code, cseq, dataoff, i; 1288 unsigned int code, cseq, i;
1202 1289
1203 if (*datalen < strlen("SIP/2.0 200")) 1290 if (*datalen < strlen("SIP/2.0 200"))
1204 return NF_ACCEPT; 1291 return NF_ACCEPT;
@@ -1212,7 +1299,7 @@ static int process_sip_response(struct sk_buff *skb,
1212 cseq = simple_strtoul(*dptr + matchoff, NULL, 10); 1299 cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
1213 if (!cseq) 1300 if (!cseq)
1214 return NF_DROP; 1301 return NF_DROP;
1215 dataoff = matchoff + matchlen + 1; 1302 matchend = matchoff + matchlen + 1;
1216 1303
1217 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { 1304 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
1218 const struct sip_handler *handler; 1305 const struct sip_handler *handler;
@@ -1220,15 +1307,16 @@ static int process_sip_response(struct sk_buff *skb,
1220 handler = &sip_handlers[i]; 1307 handler = &sip_handlers[i];
1221 if (handler->response == NULL) 1308 if (handler->response == NULL)
1222 continue; 1309 continue;
1223 if (*datalen < dataoff + handler->len || 1310 if (*datalen < matchend + handler->len ||
1224 strnicmp(*dptr + dataoff, handler->method, handler->len)) 1311 strnicmp(*dptr + matchend, handler->method, handler->len))
1225 continue; 1312 continue;
1226 return handler->response(skb, dptr, datalen, cseq, code); 1313 return handler->response(skb, dataoff, dptr, datalen,
1314 cseq, code);
1227 } 1315 }
1228 return NF_ACCEPT; 1316 return NF_ACCEPT;
1229} 1317}
1230 1318
1231static int process_sip_request(struct sk_buff *skb, 1319static int process_sip_request(struct sk_buff *skb, unsigned int dataoff,
1232 const char **dptr, unsigned int *datalen) 1320 const char **dptr, unsigned int *datalen)
1233{ 1321{
1234 enum ip_conntrack_info ctinfo; 1322 enum ip_conntrack_info ctinfo;
@@ -1253,69 +1341,157 @@ static int process_sip_request(struct sk_buff *skb,
1253 if (!cseq) 1341 if (!cseq)
1254 return NF_DROP; 1342 return NF_DROP;
1255 1343
1256 return handler->request(skb, dptr, datalen, cseq); 1344 return handler->request(skb, dataoff, dptr, datalen, cseq);
1257 } 1345 }
1258 return NF_ACCEPT; 1346 return NF_ACCEPT;
1259} 1347}
1260 1348
1261static int sip_help(struct sk_buff *skb, 1349static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
1262 unsigned int protoff, 1350 unsigned int dataoff, const char **dptr,
1263 struct nf_conn *ct, 1351 unsigned int *datalen)
1264 enum ip_conntrack_info ctinfo) 1352{
1353 typeof(nf_nat_sip_hook) nf_nat_sip;
1354 int ret;
1355
1356 if (strnicmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
1357 ret = process_sip_request(skb, dataoff, dptr, datalen);
1358 else
1359 ret = process_sip_response(skb, dataoff, dptr, datalen);
1360
1361 if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
1362 nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
1363 if (nf_nat_sip && !nf_nat_sip(skb, dataoff, dptr, datalen))
1364 ret = NF_DROP;
1365 }
1366
1367 return ret;
1368}
1369
1370static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
1371 struct nf_conn *ct, enum ip_conntrack_info ctinfo)
1265{ 1372{
1373 struct tcphdr *th, _tcph;
1266 unsigned int dataoff, datalen; 1374 unsigned int dataoff, datalen;
1267 const char *dptr; 1375 unsigned int matchoff, matchlen, clen;
1376 unsigned int msglen, origlen;
1377 const char *dptr, *end;
1378 s16 diff, tdiff = 0;
1268 int ret; 1379 int ret;
1269 typeof(nf_nat_sip_hook) nf_nat_sip; 1380 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
1381
1382 if (ctinfo != IP_CT_ESTABLISHED &&
1383 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
1384 return NF_ACCEPT;
1270 1385
1271 /* No Data ? */ 1386 /* No Data ? */
1272 dataoff = protoff + sizeof(struct udphdr); 1387 th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
1388 if (th == NULL)
1389 return NF_ACCEPT;
1390 dataoff = protoff + th->doff * 4;
1273 if (dataoff >= skb->len) 1391 if (dataoff >= skb->len)
1274 return NF_ACCEPT; 1392 return NF_ACCEPT;
1275 1393
1276 nf_ct_refresh(ct, skb, sip_timeout * HZ); 1394 nf_ct_refresh(ct, skb, sip_timeout * HZ);
1277 1395
1278 if (!skb_is_nonlinear(skb)) 1396 if (skb_is_nonlinear(skb)) {
1279 dptr = skb->data + dataoff;
1280 else {
1281 pr_debug("Copy of skbuff not supported yet.\n"); 1397 pr_debug("Copy of skbuff not supported yet.\n");
1282 return NF_ACCEPT; 1398 return NF_ACCEPT;
1283 } 1399 }
1284 1400
1401 dptr = skb->data + dataoff;
1285 datalen = skb->len - dataoff; 1402 datalen = skb->len - dataoff;
1286 if (datalen < strlen("SIP/2.0 200")) 1403 if (datalen < strlen("SIP/2.0 200"))
1287 return NF_ACCEPT; 1404 return NF_ACCEPT;
1288 1405
1289 if (strnicmp(dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0) 1406 while (1) {
1290 ret = process_sip_request(skb, &dptr, &datalen); 1407 if (ct_sip_get_header(ct, dptr, 0, datalen,
1291 else 1408 SIP_HDR_CONTENT_LENGTH,
1292 ret = process_sip_response(skb, &dptr, &datalen); 1409 &matchoff, &matchlen) <= 0)
1410 break;
1411
1412 clen = simple_strtoul(dptr + matchoff, (char **)&end, 10);
1413 if (dptr + matchoff == end)
1414 break;
1415
1416 if (end + strlen("\r\n\r\n") > dptr + datalen)
1417 break;
1418 if (end[0] != '\r' || end[1] != '\n' ||
1419 end[2] != '\r' || end[3] != '\n')
1420 break;
1421 end += strlen("\r\n\r\n") + clen;
1422
1423 msglen = origlen = end - dptr;
1424
1425 ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen);
1426 if (ret != NF_ACCEPT)
1427 break;
1428 diff = msglen - origlen;
1429 tdiff += diff;
1430
1431 dataoff += msglen;
1432 dptr += msglen;
1433 datalen = datalen + diff - msglen;
1434 }
1293 1435
1294 if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) { 1436 if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
1295 nf_nat_sip = rcu_dereference(nf_nat_sip_hook); 1437 nf_nat_sip_seq_adjust = rcu_dereference(nf_nat_sip_seq_adjust_hook);
1296 if (nf_nat_sip && !nf_nat_sip(skb, &dptr, &datalen)) 1438 if (nf_nat_sip_seq_adjust)
1297 ret = NF_DROP; 1439 nf_nat_sip_seq_adjust(skb, tdiff);
1298 } 1440 }
1299 1441
1300 return ret; 1442 return ret;
1301} 1443}
1302 1444
1303static struct nf_conntrack_helper sip[MAX_PORTS][2] __read_mostly; 1445static int sip_help_udp(struct sk_buff *skb, unsigned int protoff,
1304static char sip_names[MAX_PORTS][2][sizeof("sip-65535")] __read_mostly; 1446 struct nf_conn *ct, enum ip_conntrack_info ctinfo)
1447{
1448 unsigned int dataoff, datalen;
1449 const char *dptr;
1450
1451 /* No Data ? */
1452 dataoff = protoff + sizeof(struct udphdr);
1453 if (dataoff >= skb->len)
1454 return NF_ACCEPT;
1455
1456 nf_ct_refresh(ct, skb, sip_timeout * HZ);
1457
1458 if (skb_is_nonlinear(skb)) {
1459 pr_debug("Copy of skbuff not supported yet.\n");
1460 return NF_ACCEPT;
1461 }
1462
1463 dptr = skb->data + dataoff;
1464 datalen = skb->len - dataoff;
1465 if (datalen < strlen("SIP/2.0 200"))
1466 return NF_ACCEPT;
1467
1468 return process_sip_msg(skb, ct, dataoff, &dptr, &datalen);
1469}
1470
1471static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly;
1472static char sip_names[MAX_PORTS][4][sizeof("sip-65535")] __read_mostly;
1305 1473
1306static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = { 1474static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = {
1307 [SIP_EXPECT_SIGNALLING] = { 1475 [SIP_EXPECT_SIGNALLING] = {
1476 .name = "signalling",
1308 .max_expected = 1, 1477 .max_expected = 1,
1309 .timeout = 3 * 60, 1478 .timeout = 3 * 60,
1310 }, 1479 },
1311 [SIP_EXPECT_AUDIO] = { 1480 [SIP_EXPECT_AUDIO] = {
1481 .name = "audio",
1312 .max_expected = 2 * IP_CT_DIR_MAX, 1482 .max_expected = 2 * IP_CT_DIR_MAX,
1313 .timeout = 3 * 60, 1483 .timeout = 3 * 60,
1314 }, 1484 },
1315 [SIP_EXPECT_VIDEO] = { 1485 [SIP_EXPECT_VIDEO] = {
1486 .name = "video",
1316 .max_expected = 2 * IP_CT_DIR_MAX, 1487 .max_expected = 2 * IP_CT_DIR_MAX,
1317 .timeout = 3 * 60, 1488 .timeout = 3 * 60,
1318 }, 1489 },
1490 [SIP_EXPECT_IMAGE] = {
1491 .name = "image",
1492 .max_expected = IP_CT_DIR_MAX,
1493 .timeout = 3 * 60,
1494 },
1319}; 1495};
1320 1496
1321static void nf_conntrack_sip_fini(void) 1497static void nf_conntrack_sip_fini(void)
@@ -1323,7 +1499,7 @@ static void nf_conntrack_sip_fini(void)
1323 int i, j; 1499 int i, j;
1324 1500
1325 for (i = 0; i < ports_c; i++) { 1501 for (i = 0; i < ports_c; i++) {
1326 for (j = 0; j < 2; j++) { 1502 for (j = 0; j < ARRAY_SIZE(sip[i]); j++) {
1327 if (sip[i][j].me == NULL) 1503 if (sip[i][j].me == NULL)
1328 continue; 1504 continue;
1329 nf_conntrack_helper_unregister(&sip[i][j]); 1505 nf_conntrack_helper_unregister(&sip[i][j]);
@@ -1343,14 +1519,24 @@ static int __init nf_conntrack_sip_init(void)
1343 memset(&sip[i], 0, sizeof(sip[i])); 1519 memset(&sip[i], 0, sizeof(sip[i]));
1344 1520
1345 sip[i][0].tuple.src.l3num = AF_INET; 1521 sip[i][0].tuple.src.l3num = AF_INET;
1346 sip[i][1].tuple.src.l3num = AF_INET6; 1522 sip[i][0].tuple.dst.protonum = IPPROTO_UDP;
1347 for (j = 0; j < 2; j++) { 1523 sip[i][0].help = sip_help_udp;
1348 sip[i][j].tuple.dst.protonum = IPPROTO_UDP; 1524 sip[i][1].tuple.src.l3num = AF_INET;
1525 sip[i][1].tuple.dst.protonum = IPPROTO_TCP;
1526 sip[i][1].help = sip_help_tcp;
1527
1528 sip[i][2].tuple.src.l3num = AF_INET6;
1529 sip[i][2].tuple.dst.protonum = IPPROTO_UDP;
1530 sip[i][2].help = sip_help_udp;
1531 sip[i][3].tuple.src.l3num = AF_INET6;
1532 sip[i][3].tuple.dst.protonum = IPPROTO_TCP;
1533 sip[i][3].help = sip_help_tcp;
1534
1535 for (j = 0; j < ARRAY_SIZE(sip[i]); j++) {
1349 sip[i][j].tuple.src.u.udp.port = htons(ports[i]); 1536 sip[i][j].tuple.src.u.udp.port = htons(ports[i]);
1350 sip[i][j].expect_policy = sip_exp_policy; 1537 sip[i][j].expect_policy = sip_exp_policy;
1351 sip[i][j].expect_class_max = SIP_EXPECT_MAX; 1538 sip[i][j].expect_class_max = SIP_EXPECT_MAX;
1352 sip[i][j].me = THIS_MODULE; 1539 sip[i][j].me = THIS_MODULE;
1353 sip[i][j].help = sip_help;
1354 1540
1355 tmpname = &sip_names[i][j][0]; 1541 tmpname = &sip_names[i][j][0];
1356 if (ports[i] == SIP_PORT) 1542 if (ports[i] == SIP_PORT)
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 028aba667ef7..24a42efe62ef 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -26,6 +26,7 @@
26#include <net/netfilter/nf_conntrack_expect.h> 26#include <net/netfilter/nf_conntrack_expect.h>
27#include <net/netfilter/nf_conntrack_helper.h> 27#include <net/netfilter/nf_conntrack_helper.h>
28#include <net/netfilter/nf_conntrack_acct.h> 28#include <net/netfilter/nf_conntrack_acct.h>
29#include <net/netfilter/nf_conntrack_zones.h>
29 30
30MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
31 32
@@ -51,7 +52,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
51 struct hlist_nulls_node *n; 52 struct hlist_nulls_node *n;
52 53
53 for (st->bucket = 0; 54 for (st->bucket = 0;
54 st->bucket < nf_conntrack_htable_size; 55 st->bucket < net->ct.htable_size;
55 st->bucket++) { 56 st->bucket++) {
56 n = rcu_dereference(net->ct.hash[st->bucket].first); 57 n = rcu_dereference(net->ct.hash[st->bucket].first);
57 if (!is_a_nulls(n)) 58 if (!is_a_nulls(n))
@@ -69,7 +70,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
69 head = rcu_dereference(head->next); 70 head = rcu_dereference(head->next);
70 while (is_a_nulls(head)) { 71 while (is_a_nulls(head)) {
71 if (likely(get_nulls_value(head) == st->bucket)) { 72 if (likely(get_nulls_value(head) == st->bucket)) {
72 if (++st->bucket >= nf_conntrack_htable_size) 73 if (++st->bucket >= net->ct.htable_size)
73 return NULL; 74 return NULL;
74 } 75 }
75 head = rcu_dereference(net->ct.hash[st->bucket].first); 76 head = rcu_dereference(net->ct.hash[st->bucket].first);
@@ -171,6 +172,11 @@ static int ct_seq_show(struct seq_file *s, void *v)
171 goto release; 172 goto release;
172#endif 173#endif
173 174
175#ifdef CONFIG_NF_CONNTRACK_ZONES
176 if (seq_printf(s, "zone=%u ", nf_ct_zone(ct)))
177 goto release;
178#endif
179
174 if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) 180 if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
175 goto release; 181 goto release;
176 182
@@ -355,7 +361,7 @@ static ctl_table nf_ct_sysctl_table[] = {
355 }, 361 },
356 { 362 {
357 .procname = "nf_conntrack_buckets", 363 .procname = "nf_conntrack_buckets",
358 .data = &nf_conntrack_htable_size, 364 .data = &init_net.ct.htable_size,
359 .maxlen = sizeof(unsigned int), 365 .maxlen = sizeof(unsigned int),
360 .mode = 0444, 366 .mode = 0444,
361 .proc_handler = proc_dointvec, 367 .proc_handler = proc_dointvec,
@@ -421,6 +427,7 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
421 goto out_kmemdup; 427 goto out_kmemdup;
422 428
423 table[1].data = &net->ct.count; 429 table[1].data = &net->ct.count;
430 table[2].data = &net->ct.htable_size;
424 table[3].data = &net->ct.sysctl_checksum; 431 table[3].data = &net->ct.sysctl_checksum;
425 table[4].data = &net->ct.sysctl_log_invalid; 432 table[4].data = &net->ct.sysctl_log_invalid;
426 433
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 3a6fd77f7761..ba095fd014e5 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -265,7 +265,6 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
265 local_bh_disable(); 265 local_bh_disable();
266 entry->okfn(skb); 266 entry->okfn(skb);
267 local_bh_enable(); 267 local_bh_enable();
268 case NF_STOLEN:
269 break; 268 break;
270 case NF_QUEUE: 269 case NF_QUEUE:
271 if (!__nf_queue(skb, elem, entry->pf, entry->hook, 270 if (!__nf_queue(skb, elem, entry->pf, entry->hook,
@@ -273,6 +272,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
273 verdict >> NF_VERDICT_BITS)) 272 verdict >> NF_VERDICT_BITS))
274 goto next_hook; 273 goto next_hook;
275 break; 274 break;
275 case NF_STOLEN:
276 default: 276 default:
277 kfree_skb(skb); 277 kfree_skb(skb);
278 } 278 }
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index eedc0c1ac7a4..6afa3d52ea5f 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -40,7 +40,6 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
40 40
41static char __initdata nfversion[] = "0.30"; 41static char __initdata nfversion[] = "0.30";
42 42
43static struct sock *nfnl = NULL;
44static const struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT]; 43static const struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT];
45static DEFINE_MUTEX(nfnl_mutex); 44static DEFINE_MUTEX(nfnl_mutex);
46 45
@@ -101,34 +100,35 @@ nfnetlink_find_client(u_int16_t type, const struct nfnetlink_subsystem *ss)
101 return &ss->cb[cb_id]; 100 return &ss->cb[cb_id];
102} 101}
103 102
104int nfnetlink_has_listeners(unsigned int group) 103int nfnetlink_has_listeners(struct net *net, unsigned int group)
105{ 104{
106 return netlink_has_listeners(nfnl, group); 105 return netlink_has_listeners(net->nfnl, group);
107} 106}
108EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); 107EXPORT_SYMBOL_GPL(nfnetlink_has_listeners);
109 108
110int nfnetlink_send(struct sk_buff *skb, u32 pid, 109int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid,
111 unsigned group, int echo, gfp_t flags) 110 unsigned group, int echo, gfp_t flags)
112{ 111{
113 return nlmsg_notify(nfnl, skb, pid, group, echo, flags); 112 return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags);
114} 113}
115EXPORT_SYMBOL_GPL(nfnetlink_send); 114EXPORT_SYMBOL_GPL(nfnetlink_send);
116 115
117void nfnetlink_set_err(u32 pid, u32 group, int error) 116int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error)
118{ 117{
119 netlink_set_err(nfnl, pid, group, error); 118 return netlink_set_err(net->nfnl, pid, group, error);
120} 119}
121EXPORT_SYMBOL_GPL(nfnetlink_set_err); 120EXPORT_SYMBOL_GPL(nfnetlink_set_err);
122 121
123int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags) 122int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags)
124{ 123{
125 return netlink_unicast(nfnl, skb, pid, flags); 124 return netlink_unicast(net->nfnl, skb, pid, flags);
126} 125}
127EXPORT_SYMBOL_GPL(nfnetlink_unicast); 126EXPORT_SYMBOL_GPL(nfnetlink_unicast);
128 127
129/* Process one complete nfnetlink message. */ 128/* Process one complete nfnetlink message. */
130static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 129static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
131{ 130{
131 struct net *net = sock_net(skb->sk);
132 const struct nfnl_callback *nc; 132 const struct nfnl_callback *nc;
133 const struct nfnetlink_subsystem *ss; 133 const struct nfnetlink_subsystem *ss;
134 int type, err; 134 int type, err;
@@ -170,7 +170,7 @@ replay:
170 if (err < 0) 170 if (err < 0)
171 return err; 171 return err;
172 172
173 err = nc->call(nfnl, skb, nlh, (const struct nlattr **)cda); 173 err = nc->call(net->nfnl, skb, nlh, (const struct nlattr **)cda);
174 if (err == -EAGAIN) 174 if (err == -EAGAIN)
175 goto replay; 175 goto replay;
176 return err; 176 return err;
@@ -184,26 +184,45 @@ static void nfnetlink_rcv(struct sk_buff *skb)
184 nfnl_unlock(); 184 nfnl_unlock();
185} 185}
186 186
187static void __exit nfnetlink_exit(void) 187static int __net_init nfnetlink_net_init(struct net *net)
188{ 188{
189 printk("Removing netfilter NETLINK layer.\n"); 189 struct sock *nfnl;
190 netlink_kernel_release(nfnl); 190
191 return; 191 nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, NFNLGRP_MAX,
192 nfnetlink_rcv, NULL, THIS_MODULE);
193 if (!nfnl)
194 return -ENOMEM;
195 net->nfnl_stash = nfnl;
196 rcu_assign_pointer(net->nfnl, nfnl);
197 return 0;
192} 198}
193 199
194static int __init nfnetlink_init(void) 200static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list)
195{ 201{
196 printk("Netfilter messages via NETLINK v%s.\n", nfversion); 202 struct net *net;
197 203
198 nfnl = netlink_kernel_create(&init_net, NETLINK_NETFILTER, NFNLGRP_MAX, 204 list_for_each_entry(net, net_exit_list, exit_list)
199 nfnetlink_rcv, NULL, THIS_MODULE); 205 rcu_assign_pointer(net->nfnl, NULL);
200 if (!nfnl) { 206 synchronize_net();
201 printk(KERN_ERR "cannot initialize nfnetlink!\n"); 207 list_for_each_entry(net, net_exit_list, exit_list)
202 return -ENOMEM; 208 netlink_kernel_release(net->nfnl_stash);
203 } 209}
204 210
205 return 0; 211static struct pernet_operations nfnetlink_net_ops = {
212 .init = nfnetlink_net_init,
213 .exit_batch = nfnetlink_net_exit_batch,
214};
215
216static int __init nfnetlink_init(void)
217{
218 printk("Netfilter messages via NETLINK v%s.\n", nfversion);
219 return register_pernet_subsys(&nfnetlink_net_ops);
206} 220}
207 221
222static void __exit nfnetlink_exit(void)
223{
224 printk("Removing netfilter NETLINK layer.\n");
225 unregister_pernet_subsys(&nfnetlink_net_ops);
226}
208module_init(nfnetlink_init); 227module_init(nfnetlink_init);
209module_exit(nfnetlink_exit); 228module_exit(nfnetlink_exit);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 9de0470d557e..d9b8fb8ab340 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -323,7 +323,8 @@ __nfulnl_send(struct nfulnl_instance *inst)
323 NLMSG_DONE, 323 NLMSG_DONE,
324 sizeof(struct nfgenmsg)); 324 sizeof(struct nfgenmsg));
325 325
326 status = nfnetlink_unicast(inst->skb, inst->peer_pid, MSG_DONTWAIT); 326 status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_pid,
327 MSG_DONTWAIT);
327 328
328 inst->qlen = 0; 329 inst->qlen = 0;
329 inst->skb = NULL; 330 inst->skb = NULL;
@@ -767,7 +768,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
767 } 768 }
768 769
769 instance_destroy(inst); 770 instance_destroy(inst);
770 goto out; 771 goto out_put;
771 default: 772 default:
772 ret = -ENOTSUPP; 773 ret = -ENOTSUPP;
773 break; 774 break;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 7e3fa410641e..7ba4abc405c9 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -112,7 +112,6 @@ instance_create(u_int16_t queue_num, int pid)
112 inst->copy_mode = NFQNL_COPY_NONE; 112 inst->copy_mode = NFQNL_COPY_NONE;
113 spin_lock_init(&inst->lock); 113 spin_lock_init(&inst->lock);
114 INIT_LIST_HEAD(&inst->queue_list); 114 INIT_LIST_HEAD(&inst->queue_list);
115 INIT_RCU_HEAD(&inst->rcu);
116 115
117 if (!try_module_get(THIS_MODULE)) { 116 if (!try_module_get(THIS_MODULE)) {
118 err = -EAGAIN; 117 err = -EAGAIN;
@@ -414,13 +413,13 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
414 queue->queue_dropped++; 413 queue->queue_dropped++;
415 if (net_ratelimit()) 414 if (net_ratelimit())
416 printk(KERN_WARNING "nf_queue: full at %d entries, " 415 printk(KERN_WARNING "nf_queue: full at %d entries, "
417 "dropping packets(s). Dropped: %d\n", 416 "dropping packets(s).\n",
418 queue->queue_total, queue->queue_dropped); 417 queue->queue_total);
419 goto err_out_free_nskb; 418 goto err_out_free_nskb;
420 } 419 }
421 420
422 /* nfnetlink_unicast will either free the nskb or add it to a socket */ 421 /* nfnetlink_unicast will either free the nskb or add it to a socket */
423 err = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT); 422 err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT);
424 if (err < 0) { 423 if (err < 0) {
425 queue->queue_user_dropped++; 424 queue->queue_user_dropped++;
426 goto err_out_unlock; 425 goto err_out_unlock;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index f01955cce314..0a12cedfe9e3 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -26,7 +26,9 @@
26 26
27#include <linux/netfilter/x_tables.h> 27#include <linux/netfilter/x_tables.h>
28#include <linux/netfilter_arp.h> 28#include <linux/netfilter_arp.h>
29 29#include <linux/netfilter_ipv4/ip_tables.h>
30#include <linux/netfilter_ipv6/ip6_tables.h>
31#include <linux/netfilter_arp/arp_tables.h>
30 32
31MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
32MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 34MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
@@ -37,7 +39,7 @@ MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
37struct compat_delta { 39struct compat_delta {
38 struct compat_delta *next; 40 struct compat_delta *next;
39 unsigned int offset; 41 unsigned int offset;
40 short delta; 42 int delta;
41}; 43};
42 44
43struct xt_af { 45struct xt_af {
@@ -364,8 +366,10 @@ int xt_check_match(struct xt_mtchk_param *par,
364 * ebt_among is exempt from centralized matchsize checking 366 * ebt_among is exempt from centralized matchsize checking
365 * because it uses a dynamic-size data set. 367 * because it uses a dynamic-size data set.
366 */ 368 */
367 pr_err("%s_tables: %s match: invalid size %Zu != %u\n", 369 pr_err("%s_tables: %s.%u match: invalid size "
370 "%u (kernel) != (user) %u\n",
368 xt_prefix[par->family], par->match->name, 371 xt_prefix[par->family], par->match->name,
372 par->match->revision,
369 XT_ALIGN(par->match->matchsize), size); 373 XT_ALIGN(par->match->matchsize), size);
370 return -EINVAL; 374 return -EINVAL;
371 } 375 }
@@ -435,10 +439,10 @@ void xt_compat_flush_offsets(u_int8_t af)
435} 439}
436EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); 440EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
437 441
438short xt_compat_calc_jump(u_int8_t af, unsigned int offset) 442int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
439{ 443{
440 struct compat_delta *tmp; 444 struct compat_delta *tmp;
441 short delta; 445 int delta;
442 446
443 for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next) 447 for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next)
444 if (tmp->offset < offset) 448 if (tmp->offset < offset)
@@ -481,8 +485,8 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
481} 485}
482EXPORT_SYMBOL_GPL(xt_compat_match_from_user); 486EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
483 487
484int xt_compat_match_to_user(struct xt_entry_match *m, void __user **dstptr, 488int xt_compat_match_to_user(const struct xt_entry_match *m,
485 unsigned int *size) 489 void __user **dstptr, unsigned int *size)
486{ 490{
487 const struct xt_match *match = m->u.kernel.match; 491 const struct xt_match *match = m->u.kernel.match;
488 struct compat_xt_entry_match __user *cm = *dstptr; 492 struct compat_xt_entry_match __user *cm = *dstptr;
@@ -514,8 +518,10 @@ int xt_check_target(struct xt_tgchk_param *par,
514 unsigned int size, u_int8_t proto, bool inv_proto) 518 unsigned int size, u_int8_t proto, bool inv_proto)
515{ 519{
516 if (XT_ALIGN(par->target->targetsize) != size) { 520 if (XT_ALIGN(par->target->targetsize) != size) {
517 pr_err("%s_tables: %s target: invalid size %Zu != %u\n", 521 pr_err("%s_tables: %s.%u target: invalid size "
522 "%u (kernel) != (user) %u\n",
518 xt_prefix[par->family], par->target->name, 523 xt_prefix[par->family], par->target->name,
524 par->target->revision,
519 XT_ALIGN(par->target->targetsize), size); 525 XT_ALIGN(par->target->targetsize), size);
520 return -EINVAL; 526 return -EINVAL;
521 } 527 }
@@ -582,8 +588,8 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
582} 588}
583EXPORT_SYMBOL_GPL(xt_compat_target_from_user); 589EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
584 590
585int xt_compat_target_to_user(struct xt_entry_target *t, void __user **dstptr, 591int xt_compat_target_to_user(const struct xt_entry_target *t,
586 unsigned int *size) 592 void __user **dstptr, unsigned int *size)
587{ 593{
588 const struct xt_target *target = t->u.kernel.target; 594 const struct xt_target *target = t->u.kernel.target;
589 struct compat_xt_entry_target __user *ct = *dstptr; 595 struct compat_xt_entry_target __user *ct = *dstptr;
@@ -1091,6 +1097,60 @@ static const struct file_operations xt_target_ops = {
1091 1097
1092#endif /* CONFIG_PROC_FS */ 1098#endif /* CONFIG_PROC_FS */
1093 1099
1100/**
1101 * xt_hook_link - set up hooks for a new table
1102 * @table: table with metadata needed to set up hooks
1103 * @fn: Hook function
1104 *
1105 * This function will take care of creating and registering the necessary
1106 * Netfilter hooks for XT tables.
1107 */
1108struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
1109{
1110 unsigned int hook_mask = table->valid_hooks;
1111 uint8_t i, num_hooks = hweight32(hook_mask);
1112 uint8_t hooknum;
1113 struct nf_hook_ops *ops;
1114 int ret;
1115
1116 ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
1117 if (ops == NULL)
1118 return ERR_PTR(-ENOMEM);
1119
1120 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1121 hook_mask >>= 1, ++hooknum) {
1122 if (!(hook_mask & 1))
1123 continue;
1124 ops[i].hook = fn;
1125 ops[i].owner = table->me;
1126 ops[i].pf = table->af;
1127 ops[i].hooknum = hooknum;
1128 ops[i].priority = table->priority;
1129 ++i;
1130 }
1131
1132 ret = nf_register_hooks(ops, num_hooks);
1133 if (ret < 0) {
1134 kfree(ops);
1135 return ERR_PTR(ret);
1136 }
1137
1138 return ops;
1139}
1140EXPORT_SYMBOL_GPL(xt_hook_link);
1141
1142/**
1143 * xt_hook_unlink - remove hooks for a table
1144 * @ops: nf_hook_ops array as returned by nf_hook_link
1145 * @hook_mask: the very same mask that was passed to nf_hook_link
1146 */
1147void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops)
1148{
1149 nf_unregister_hooks(ops, hweight32(table->valid_hooks));
1150 kfree(ops);
1151}
1152EXPORT_SYMBOL_GPL(xt_hook_unlink);
1153
1094int xt_proto_init(struct net *net, u_int8_t af) 1154int xt_proto_init(struct net *net, u_int8_t af)
1095{ 1155{
1096#ifdef CONFIG_PROC_FS 1156#ifdef CONFIG_PROC_FS
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
new file mode 100644
index 000000000000..61c50fa84703
--- /dev/null
+++ b/net/netfilter/xt_CT.c
@@ -0,0 +1,164 @@
1/*
2 * Copyright (c) 2010 Patrick McHardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/module.h>
10#include <linux/skbuff.h>
11#include <linux/selinux.h>
12#include <linux/netfilter_ipv4/ip_tables.h>
13#include <linux/netfilter_ipv6/ip6_tables.h>
14#include <linux/netfilter/x_tables.h>
15#include <linux/netfilter/xt_CT.h>
16#include <net/netfilter/nf_conntrack.h>
17#include <net/netfilter/nf_conntrack_helper.h>
18#include <net/netfilter/nf_conntrack_ecache.h>
19#include <net/netfilter/nf_conntrack_zones.h>
20
21static unsigned int xt_ct_target(struct sk_buff *skb,
22 const struct xt_target_param *par)
23{
24 const struct xt_ct_target_info *info = par->targinfo;
25 struct nf_conn *ct = info->ct;
26
27 /* Previously seen (loopback)? Ignore. */
28 if (skb->nfct != NULL)
29 return XT_CONTINUE;
30
31 atomic_inc(&ct->ct_general.use);
32 skb->nfct = &ct->ct_general;
33 skb->nfctinfo = IP_CT_NEW;
34
35 return XT_CONTINUE;
36}
37
38static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
39{
40 if (par->family == AF_INET) {
41 const struct ipt_entry *e = par->entryinfo;
42
43 if (e->ip.invflags & IPT_INV_PROTO)
44 return 0;
45 return e->ip.proto;
46 } else if (par->family == AF_INET6) {
47 const struct ip6t_entry *e = par->entryinfo;
48
49 if (e->ipv6.invflags & IP6T_INV_PROTO)
50 return 0;
51 return e->ipv6.proto;
52 } else
53 return 0;
54}
55
56static bool xt_ct_tg_check(const struct xt_tgchk_param *par)
57{
58 struct xt_ct_target_info *info = par->targinfo;
59 struct nf_conntrack_tuple t;
60 struct nf_conn_help *help;
61 struct nf_conn *ct;
62 u8 proto;
63
64 if (info->flags & ~XT_CT_NOTRACK)
65 return false;
66
67 if (info->flags & XT_CT_NOTRACK) {
68 ct = &nf_conntrack_untracked;
69 atomic_inc(&ct->ct_general.use);
70 goto out;
71 }
72
73#ifndef CONFIG_NF_CONNTRACK_ZONES
74 if (info->zone)
75 goto err1;
76#endif
77
78 if (nf_ct_l3proto_try_module_get(par->family) < 0)
79 goto err1;
80
81 memset(&t, 0, sizeof(t));
82 ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
83 if (IS_ERR(ct))
84 goto err2;
85
86 if ((info->ct_events || info->exp_events) &&
87 !nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events,
88 GFP_KERNEL))
89 goto err3;
90
91 if (info->helper[0]) {
92 proto = xt_ct_find_proto(par);
93 if (!proto)
94 goto err3;
95
96 help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
97 if (help == NULL)
98 goto err3;
99
100 help->helper = nf_conntrack_helper_try_module_get(info->helper,
101 par->family,
102 proto);
103 if (help->helper == NULL)
104 goto err3;
105 }
106
107 __set_bit(IPS_TEMPLATE_BIT, &ct->status);
108 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
109out:
110 info->ct = ct;
111 return true;
112
113err3:
114 nf_conntrack_free(ct);
115err2:
116 nf_ct_l3proto_module_put(par->family);
117err1:
118 return false;
119}
120
121static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
122{
123 struct xt_ct_target_info *info = par->targinfo;
124 struct nf_conn *ct = info->ct;
125 struct nf_conn_help *help;
126
127 if (ct != &nf_conntrack_untracked) {
128 help = nfct_help(ct);
129 if (help)
130 module_put(help->helper->me);
131
132 nf_ct_l3proto_module_put(par->family);
133 }
134 nf_ct_put(info->ct);
135}
136
137static struct xt_target xt_ct_tg __read_mostly = {
138 .name = "CT",
139 .family = NFPROTO_UNSPEC,
140 .targetsize = XT_ALIGN(sizeof(struct xt_ct_target_info)),
141 .checkentry = xt_ct_tg_check,
142 .destroy = xt_ct_tg_destroy,
143 .target = xt_ct_target,
144 .table = "raw",
145 .me = THIS_MODULE,
146};
147
148static int __init xt_ct_tg_init(void)
149{
150 return xt_register_target(&xt_ct_tg);
151}
152
153static void __exit xt_ct_tg_exit(void)
154{
155 xt_unregister_target(&xt_ct_tg);
156}
157
158module_init(xt_ct_tg_init);
159module_exit(xt_ct_tg_exit);
160
161MODULE_LICENSE("GPL");
162MODULE_DESCRIPTION("Xtables: connection tracking target");
163MODULE_ALIAS("ipt_CT");
164MODULE_ALIAS("ip6t_CT");
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index f28f6a5fc02d..12dcd7007c3e 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -28,6 +28,7 @@ MODULE_ALIAS("ip6t_NFQUEUE");
28MODULE_ALIAS("arpt_NFQUEUE"); 28MODULE_ALIAS("arpt_NFQUEUE");
29 29
30static u32 jhash_initval __read_mostly; 30static u32 jhash_initval __read_mostly;
31static bool rnd_inited __read_mostly;
31 32
32static unsigned int 33static unsigned int
33nfqueue_tg(struct sk_buff *skb, const struct xt_target_param *par) 34nfqueue_tg(struct sk_buff *skb, const struct xt_target_param *par)
@@ -90,6 +91,10 @@ static bool nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
90 const struct xt_NFQ_info_v1 *info = par->targinfo; 91 const struct xt_NFQ_info_v1 *info = par->targinfo;
91 u32 maxid; 92 u32 maxid;
92 93
94 if (unlikely(!rnd_inited)) {
95 get_random_bytes(&jhash_initval, sizeof(jhash_initval));
96 rnd_inited = true;
97 }
93 if (info->queues_total == 0) { 98 if (info->queues_total == 0) {
94 pr_err("NFQUEUE: number of total queues is 0\n"); 99 pr_err("NFQUEUE: number of total queues is 0\n");
95 return false; 100 return false;
@@ -135,7 +140,6 @@ static struct xt_target nfqueue_tg_reg[] __read_mostly = {
135 140
136static int __init nfqueue_tg_init(void) 141static int __init nfqueue_tg_init(void)
137{ 142{
138 get_random_bytes(&jhash_initval, sizeof(jhash_initval));
139 return xt_register_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg)); 143 return xt_register_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg));
140} 144}
141 145
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index d80b8192e0d4..87ae97e5516f 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -23,6 +23,7 @@ static DEFINE_MUTEX(xt_rateest_mutex);
23#define RATEEST_HSIZE 16 23#define RATEEST_HSIZE 16
24static struct hlist_head rateest_hash[RATEEST_HSIZE] __read_mostly; 24static struct hlist_head rateest_hash[RATEEST_HSIZE] __read_mostly;
25static unsigned int jhash_rnd __read_mostly; 25static unsigned int jhash_rnd __read_mostly;
26static bool rnd_inited __read_mostly;
26 27
27static unsigned int xt_rateest_hash(const char *name) 28static unsigned int xt_rateest_hash(const char *name)
28{ 29{
@@ -93,6 +94,11 @@ static bool xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
93 struct gnet_estimator est; 94 struct gnet_estimator est;
94 } cfg; 95 } cfg;
95 96
97 if (unlikely(!rnd_inited)) {
98 get_random_bytes(&jhash_rnd, sizeof(jhash_rnd));
99 rnd_inited = true;
100 }
101
96 est = xt_rateest_lookup(info->name); 102 est = xt_rateest_lookup(info->name);
97 if (est) { 103 if (est) {
98 /* 104 /*
@@ -164,7 +170,6 @@ static int __init xt_rateest_tg_init(void)
164 for (i = 0; i < ARRAY_SIZE(rateest_hash); i++) 170 for (i = 0; i < ARRAY_SIZE(rateest_hash); i++)
165 INIT_HLIST_HEAD(&rateest_hash[i]); 171 INIT_HLIST_HEAD(&rateest_hash[i]);
166 172
167 get_random_bytes(&jhash_rnd, sizeof(jhash_rnd));
168 return xt_register_target(&xt_rateest_tg_reg); 173 return xt_register_target(&xt_rateest_tg_reg);
169} 174}
170 175
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index eda64c1cb1e5..0e357ac9a2a8 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -60,17 +60,9 @@ tcpmss_mangle_packet(struct sk_buff *skb,
60 tcplen = skb->len - tcphoff; 60 tcplen = skb->len - tcphoff;
61 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 61 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
62 62
63 /* Since it passed flags test in tcp match, we know it is is 63 /* Header cannot be larger than the packet */
64 not a fragment, and has data >= tcp header length. SYN 64 if (tcplen < tcph->doff*4)
65 packets should not contain data: if they did, then we risk
66 running over MTU, sending Frag Needed and breaking things
67 badly. --RR */
68 if (tcplen != tcph->doff*4) {
69 if (net_ratelimit())
70 printk(KERN_ERR "xt_TCPMSS: bad length (%u bytes)\n",
71 skb->len);
72 return -1; 65 return -1;
73 }
74 66
75 if (info->mss == XT_TCPMSS_CLAMP_PMTU) { 67 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
76 if (dst_mtu(skb_dst(skb)) <= minlen) { 68 if (dst_mtu(skb_dst(skb)) <= minlen) {
@@ -115,6 +107,12 @@ tcpmss_mangle_packet(struct sk_buff *skb,
115 } 107 }
116 } 108 }
117 109
110 /* There is data after the header so the option can't be added
111 without moving it, and doing so may make the SYN packet
112 itself too large. Accept the packet unmodified instead. */
113 if (tcplen > tcph->doff*4)
114 return 0;
115
118 /* 116 /*
119 * MSS Option not found ?! add it.. 117 * MSS Option not found ?! add it..
120 */ 118 */
@@ -241,6 +239,7 @@ static bool tcpmss_tg4_check(const struct xt_tgchk_param *par)
241{ 239{
242 const struct xt_tcpmss_info *info = par->targinfo; 240 const struct xt_tcpmss_info *info = par->targinfo;
243 const struct ipt_entry *e = par->entryinfo; 241 const struct ipt_entry *e = par->entryinfo;
242 const struct xt_entry_match *ematch;
244 243
245 if (info->mss == XT_TCPMSS_CLAMP_PMTU && 244 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
246 (par->hook_mask & ~((1 << NF_INET_FORWARD) | 245 (par->hook_mask & ~((1 << NF_INET_FORWARD) |
@@ -250,8 +249,9 @@ static bool tcpmss_tg4_check(const struct xt_tgchk_param *par)
250 "FORWARD, OUTPUT and POSTROUTING hooks\n"); 249 "FORWARD, OUTPUT and POSTROUTING hooks\n");
251 return false; 250 return false;
252 } 251 }
253 if (IPT_MATCH_ITERATE(e, find_syn_match)) 252 xt_ematch_foreach(ematch, e)
254 return true; 253 if (find_syn_match(ematch))
254 return true;
255 printk("xt_TCPMSS: Only works on TCP SYN packets\n"); 255 printk("xt_TCPMSS: Only works on TCP SYN packets\n");
256 return false; 256 return false;
257} 257}
@@ -261,6 +261,7 @@ static bool tcpmss_tg6_check(const struct xt_tgchk_param *par)
261{ 261{
262 const struct xt_tcpmss_info *info = par->targinfo; 262 const struct xt_tcpmss_info *info = par->targinfo;
263 const struct ip6t_entry *e = par->entryinfo; 263 const struct ip6t_entry *e = par->entryinfo;
264 const struct xt_entry_match *ematch;
264 265
265 if (info->mss == XT_TCPMSS_CLAMP_PMTU && 266 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
266 (par->hook_mask & ~((1 << NF_INET_FORWARD) | 267 (par->hook_mask & ~((1 << NF_INET_FORWARD) |
@@ -270,8 +271,9 @@ static bool tcpmss_tg6_check(const struct xt_tgchk_param *par)
270 "FORWARD, OUTPUT and POSTROUTING hooks\n"); 271 "FORWARD, OUTPUT and POSTROUTING hooks\n");
271 return false; 272 return false;
272 } 273 }
273 if (IP6T_MATCH_ITERATE(e, find_syn_match)) 274 xt_ematch_foreach(ematch, e)
274 return true; 275 if (find_syn_match(ematch))
276 return true;
275 printk("xt_TCPMSS: Only works on TCP SYN packets\n"); 277 printk("xt_TCPMSS: Only works on TCP SYN packets\n");
276 return false; 278 return false;
277} 279}
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 38f03f75a636..26997ce90e48 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -28,6 +28,7 @@
28#include <net/netfilter/nf_conntrack.h> 28#include <net/netfilter/nf_conntrack.h>
29#include <net/netfilter/nf_conntrack_core.h> 29#include <net/netfilter/nf_conntrack_core.h>
30#include <net/netfilter/nf_conntrack_tuple.h> 30#include <net/netfilter/nf_conntrack_tuple.h>
31#include <net/netfilter/nf_conntrack_zones.h>
31 32
32/* we will save the tuples of all connections we care about */ 33/* we will save the tuples of all connections we care about */
33struct xt_connlimit_conn { 34struct xt_connlimit_conn {
@@ -40,15 +41,11 @@ struct xt_connlimit_data {
40 spinlock_t lock; 41 spinlock_t lock;
41}; 42};
42 43
43static u_int32_t connlimit_rnd; 44static u_int32_t connlimit_rnd __read_mostly;
44static bool connlimit_rnd_inited; 45static bool connlimit_rnd_inited __read_mostly;
45 46
46static inline unsigned int connlimit_iphash(__be32 addr) 47static inline unsigned int connlimit_iphash(__be32 addr)
47{ 48{
48 if (unlikely(!connlimit_rnd_inited)) {
49 get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
50 connlimit_rnd_inited = true;
51 }
52 return jhash_1word((__force __u32)addr, connlimit_rnd) & 0xFF; 49 return jhash_1word((__force __u32)addr, connlimit_rnd) & 0xFF;
53} 50}
54 51
@@ -59,11 +56,6 @@ connlimit_iphash6(const union nf_inet_addr *addr,
59 union nf_inet_addr res; 56 union nf_inet_addr res;
60 unsigned int i; 57 unsigned int i;
61 58
62 if (unlikely(!connlimit_rnd_inited)) {
63 get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
64 connlimit_rnd_inited = true;
65 }
66
67 for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i) 59 for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i)
68 res.ip6[i] = addr->ip6[i] & mask->ip6[i]; 60 res.ip6[i] = addr->ip6[i] & mask->ip6[i];
69 61
@@ -99,7 +91,8 @@ same_source_net(const union nf_inet_addr *addr,
99 } 91 }
100} 92}
101 93
102static int count_them(struct xt_connlimit_data *data, 94static int count_them(struct net *net,
95 struct xt_connlimit_data *data,
103 const struct nf_conntrack_tuple *tuple, 96 const struct nf_conntrack_tuple *tuple,
104 const union nf_inet_addr *addr, 97 const union nf_inet_addr *addr,
105 const union nf_inet_addr *mask, 98 const union nf_inet_addr *mask,
@@ -122,7 +115,8 @@ static int count_them(struct xt_connlimit_data *data,
122 115
123 /* check the saved connections */ 116 /* check the saved connections */
124 list_for_each_entry_safe(conn, tmp, hash, list) { 117 list_for_each_entry_safe(conn, tmp, hash, list) {
125 found = nf_conntrack_find_get(&init_net, &conn->tuple); 118 found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE,
119 &conn->tuple);
126 found_ct = NULL; 120 found_ct = NULL;
127 121
128 if (found != NULL) 122 if (found != NULL)
@@ -180,6 +174,7 @@ static int count_them(struct xt_connlimit_data *data,
180static bool 174static bool
181connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par) 175connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
182{ 176{
177 struct net *net = dev_net(par->in ? par->in : par->out);
183 const struct xt_connlimit_info *info = par->matchinfo; 178 const struct xt_connlimit_info *info = par->matchinfo;
184 union nf_inet_addr addr; 179 union nf_inet_addr addr;
185 struct nf_conntrack_tuple tuple; 180 struct nf_conntrack_tuple tuple;
@@ -204,7 +199,7 @@ connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
204 } 199 }
205 200
206 spin_lock_bh(&info->data->lock); 201 spin_lock_bh(&info->data->lock);
207 connections = count_them(info->data, tuple_ptr, &addr, 202 connections = count_them(net, info->data, tuple_ptr, &addr,
208 &info->mask, par->family); 203 &info->mask, par->family);
209 spin_unlock_bh(&info->data->lock); 204 spin_unlock_bh(&info->data->lock);
210 205
@@ -226,6 +221,10 @@ static bool connlimit_mt_check(const struct xt_mtchk_param *par)
226 struct xt_connlimit_info *info = par->matchinfo; 221 struct xt_connlimit_info *info = par->matchinfo;
227 unsigned int i; 222 unsigned int i;
228 223
224 if (unlikely(!connlimit_rnd_inited)) {
225 get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
226 connlimit_rnd_inited = true;
227 }
229 if (nf_ct_l3proto_try_module_get(par->family) < 0) { 228 if (nf_ct_l3proto_try_module_get(par->family) < 0) {
230 printk(KERN_WARNING "cannot load conntrack support for " 229 printk(KERN_WARNING "cannot load conntrack support for "
231 "address family %u\n", par->family); 230 "address family %u\n", par->family);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index dd16e404424f..215a64835de8 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * xt_hashlimit - Netfilter module to limit the number of packets per time 2 * xt_hashlimit - Netfilter module to limit the number of packets per time
3 * seperately for each hashbucket (sourceip/sourceport/dstip/dstport) 3 * separately for each hashbucket (sourceip/sourceport/dstip/dstport)
4 * 4 *
5 * (C) 2003-2004 by Harald Welte <laforge@netfilter.org> 5 * (C) 2003-2004 by Harald Welte <laforge@netfilter.org>
6 * Copyright © CC Computer Consultants GmbH, 2007 - 2008 6 * Copyright © CC Computer Consultants GmbH, 2007 - 2008
@@ -26,6 +26,7 @@
26#endif 26#endif
27 27
28#include <net/net_namespace.h> 28#include <net/net_namespace.h>
29#include <net/netns/generic.h>
29 30
30#include <linux/netfilter/x_tables.h> 31#include <linux/netfilter/x_tables.h>
31#include <linux/netfilter_ipv4/ip_tables.h> 32#include <linux/netfilter_ipv4/ip_tables.h>
@@ -40,9 +41,19 @@ MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match");
40MODULE_ALIAS("ipt_hashlimit"); 41MODULE_ALIAS("ipt_hashlimit");
41MODULE_ALIAS("ip6t_hashlimit"); 42MODULE_ALIAS("ip6t_hashlimit");
42 43
44struct hashlimit_net {
45 struct hlist_head htables;
46 struct proc_dir_entry *ipt_hashlimit;
47 struct proc_dir_entry *ip6t_hashlimit;
48};
49
50static int hashlimit_net_id;
51static inline struct hashlimit_net *hashlimit_pernet(struct net *net)
52{
53 return net_generic(net, hashlimit_net_id);
54}
55
43/* need to declare this at the top */ 56/* need to declare this at the top */
44static struct proc_dir_entry *hashlimit_procdir4;
45static struct proc_dir_entry *hashlimit_procdir6;
46static const struct file_operations dl_file_ops; 57static const struct file_operations dl_file_ops;
47 58
48/* hash table crap */ 59/* hash table crap */
@@ -79,27 +90,26 @@ struct dsthash_ent {
79 90
80struct xt_hashlimit_htable { 91struct xt_hashlimit_htable {
81 struct hlist_node node; /* global list of all htables */ 92 struct hlist_node node; /* global list of all htables */
82 atomic_t use; 93 int use;
83 u_int8_t family; 94 u_int8_t family;
95 bool rnd_initialized;
84 96
85 struct hashlimit_cfg1 cfg; /* config */ 97 struct hashlimit_cfg1 cfg; /* config */
86 98
87 /* used internally */ 99 /* used internally */
88 spinlock_t lock; /* lock for list_head */ 100 spinlock_t lock; /* lock for list_head */
89 u_int32_t rnd; /* random seed for hash */ 101 u_int32_t rnd; /* random seed for hash */
90 int rnd_initialized;
91 unsigned int count; /* number entries in table */ 102 unsigned int count; /* number entries in table */
92 struct timer_list timer; /* timer for gc */ 103 struct timer_list timer; /* timer for gc */
93 104
94 /* seq_file stuff */ 105 /* seq_file stuff */
95 struct proc_dir_entry *pde; 106 struct proc_dir_entry *pde;
107 struct net *net;
96 108
97 struct hlist_head hash[0]; /* hashtable itself */ 109 struct hlist_head hash[0]; /* hashtable itself */
98}; 110};
99 111
100static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ 112static DEFINE_MUTEX(hashlimit_mutex); /* protects htables list */
101static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */
102static HLIST_HEAD(hashlimit_htables);
103static struct kmem_cache *hashlimit_cachep __read_mostly; 113static struct kmem_cache *hashlimit_cachep __read_mostly;
104 114
105static inline bool dst_cmp(const struct dsthash_ent *ent, 115static inline bool dst_cmp(const struct dsthash_ent *ent,
@@ -150,7 +160,7 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht,
150 * the first hashtable entry */ 160 * the first hashtable entry */
151 if (!ht->rnd_initialized) { 161 if (!ht->rnd_initialized) {
152 get_random_bytes(&ht->rnd, sizeof(ht->rnd)); 162 get_random_bytes(&ht->rnd, sizeof(ht->rnd));
153 ht->rnd_initialized = 1; 163 ht->rnd_initialized = true;
154 } 164 }
155 165
156 if (ht->cfg.max && ht->count >= ht->cfg.max) { 166 if (ht->cfg.max && ht->count >= ht->cfg.max) {
@@ -185,8 +195,9 @@ dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
185} 195}
186static void htable_gc(unsigned long htlong); 196static void htable_gc(unsigned long htlong);
187 197
188static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family) 198static int htable_create_v0(struct net *net, struct xt_hashlimit_info *minfo, u_int8_t family)
189{ 199{
200 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
190 struct xt_hashlimit_htable *hinfo; 201 struct xt_hashlimit_htable *hinfo;
191 unsigned int size; 202 unsigned int size;
192 unsigned int i; 203 unsigned int i;
@@ -232,33 +243,34 @@ static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family)
232 for (i = 0; i < hinfo->cfg.size; i++) 243 for (i = 0; i < hinfo->cfg.size; i++)
233 INIT_HLIST_HEAD(&hinfo->hash[i]); 244 INIT_HLIST_HEAD(&hinfo->hash[i]);
234 245
235 atomic_set(&hinfo->use, 1); 246 hinfo->use = 1;
236 hinfo->count = 0; 247 hinfo->count = 0;
237 hinfo->family = family; 248 hinfo->family = family;
238 hinfo->rnd_initialized = 0; 249 hinfo->rnd_initialized = false;
239 spin_lock_init(&hinfo->lock); 250 spin_lock_init(&hinfo->lock);
240 hinfo->pde = proc_create_data(minfo->name, 0, 251 hinfo->pde = proc_create_data(minfo->name, 0,
241 (family == NFPROTO_IPV4) ? 252 (family == NFPROTO_IPV4) ?
242 hashlimit_procdir4 : hashlimit_procdir6, 253 hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
243 &dl_file_ops, hinfo); 254 &dl_file_ops, hinfo);
244 if (!hinfo->pde) { 255 if (!hinfo->pde) {
245 vfree(hinfo); 256 vfree(hinfo);
246 return -1; 257 return -1;
247 } 258 }
259 hinfo->net = net;
248 260
249 setup_timer(&hinfo->timer, htable_gc, (unsigned long )hinfo); 261 setup_timer(&hinfo->timer, htable_gc, (unsigned long )hinfo);
250 hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval); 262 hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
251 add_timer(&hinfo->timer); 263 add_timer(&hinfo->timer);
252 264
253 spin_lock_bh(&hashlimit_lock); 265 hlist_add_head(&hinfo->node, &hashlimit_net->htables);
254 hlist_add_head(&hinfo->node, &hashlimit_htables);
255 spin_unlock_bh(&hashlimit_lock);
256 266
257 return 0; 267 return 0;
258} 268}
259 269
260static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family) 270static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
271 u_int8_t family)
261{ 272{
273 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
262 struct xt_hashlimit_htable *hinfo; 274 struct xt_hashlimit_htable *hinfo;
263 unsigned int size; 275 unsigned int size;
264 unsigned int i; 276 unsigned int i;
@@ -293,28 +305,27 @@ static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family)
293 for (i = 0; i < hinfo->cfg.size; i++) 305 for (i = 0; i < hinfo->cfg.size; i++)
294 INIT_HLIST_HEAD(&hinfo->hash[i]); 306 INIT_HLIST_HEAD(&hinfo->hash[i]);
295 307
296 atomic_set(&hinfo->use, 1); 308 hinfo->use = 1;
297 hinfo->count = 0; 309 hinfo->count = 0;
298 hinfo->family = family; 310 hinfo->family = family;
299 hinfo->rnd_initialized = 0; 311 hinfo->rnd_initialized = false;
300 spin_lock_init(&hinfo->lock); 312 spin_lock_init(&hinfo->lock);
301 313
302 hinfo->pde = proc_create_data(minfo->name, 0, 314 hinfo->pde = proc_create_data(minfo->name, 0,
303 (family == NFPROTO_IPV4) ? 315 (family == NFPROTO_IPV4) ?
304 hashlimit_procdir4 : hashlimit_procdir6, 316 hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
305 &dl_file_ops, hinfo); 317 &dl_file_ops, hinfo);
306 if (hinfo->pde == NULL) { 318 if (hinfo->pde == NULL) {
307 vfree(hinfo); 319 vfree(hinfo);
308 return -1; 320 return -1;
309 } 321 }
322 hinfo->net = net;
310 323
311 setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo); 324 setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo);
312 hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval); 325 hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
313 add_timer(&hinfo->timer); 326 add_timer(&hinfo->timer);
314 327
315 spin_lock_bh(&hashlimit_lock); 328 hlist_add_head(&hinfo->node, &hashlimit_net->htables);
316 hlist_add_head(&hinfo->node, &hashlimit_htables);
317 spin_unlock_bh(&hashlimit_lock);
318 329
319 return 0; 330 return 0;
320} 331}
@@ -364,43 +375,46 @@ static void htable_gc(unsigned long htlong)
364 375
365static void htable_destroy(struct xt_hashlimit_htable *hinfo) 376static void htable_destroy(struct xt_hashlimit_htable *hinfo)
366{ 377{
378 struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net);
379 struct proc_dir_entry *parent;
380
367 del_timer_sync(&hinfo->timer); 381 del_timer_sync(&hinfo->timer);
368 382
369 /* remove proc entry */ 383 if (hinfo->family == NFPROTO_IPV4)
370 remove_proc_entry(hinfo->pde->name, 384 parent = hashlimit_net->ipt_hashlimit;
371 hinfo->family == NFPROTO_IPV4 ? hashlimit_procdir4 : 385 else
372 hashlimit_procdir6); 386 parent = hashlimit_net->ip6t_hashlimit;
387 remove_proc_entry(hinfo->pde->name, parent);
373 htable_selective_cleanup(hinfo, select_all); 388 htable_selective_cleanup(hinfo, select_all);
374 vfree(hinfo); 389 vfree(hinfo);
375} 390}
376 391
377static struct xt_hashlimit_htable *htable_find_get(const char *name, 392static struct xt_hashlimit_htable *htable_find_get(struct net *net,
393 const char *name,
378 u_int8_t family) 394 u_int8_t family)
379{ 395{
396 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
380 struct xt_hashlimit_htable *hinfo; 397 struct xt_hashlimit_htable *hinfo;
381 struct hlist_node *pos; 398 struct hlist_node *pos;
382 399
383 spin_lock_bh(&hashlimit_lock); 400 hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node) {
384 hlist_for_each_entry(hinfo, pos, &hashlimit_htables, node) {
385 if (!strcmp(name, hinfo->pde->name) && 401 if (!strcmp(name, hinfo->pde->name) &&
386 hinfo->family == family) { 402 hinfo->family == family) {
387 atomic_inc(&hinfo->use); 403 hinfo->use++;
388 spin_unlock_bh(&hashlimit_lock);
389 return hinfo; 404 return hinfo;
390 } 405 }
391 } 406 }
392 spin_unlock_bh(&hashlimit_lock);
393 return NULL; 407 return NULL;
394} 408}
395 409
396static void htable_put(struct xt_hashlimit_htable *hinfo) 410static void htable_put(struct xt_hashlimit_htable *hinfo)
397{ 411{
398 if (atomic_dec_and_test(&hinfo->use)) { 412 mutex_lock(&hashlimit_mutex);
399 spin_lock_bh(&hashlimit_lock); 413 if (--hinfo->use == 0) {
400 hlist_del(&hinfo->node); 414 hlist_del(&hinfo->node);
401 spin_unlock_bh(&hashlimit_lock);
402 htable_destroy(hinfo); 415 htable_destroy(hinfo);
403 } 416 }
417 mutex_unlock(&hashlimit_mutex);
404} 418}
405 419
406/* The algorithm used is the Simple Token Bucket Filter (TBF) 420/* The algorithm used is the Simple Token Bucket Filter (TBF)
@@ -479,6 +493,7 @@ static void hashlimit_ipv6_mask(__be32 *i, unsigned int p)
479 case 64 ... 95: 493 case 64 ... 95:
480 i[2] = maskl(i[2], p - 64); 494 i[2] = maskl(i[2], p - 64);
481 i[3] = 0; 495 i[3] = 0;
496 break;
482 case 96 ... 127: 497 case 96 ... 127:
483 i[3] = maskl(i[3], p - 96); 498 i[3] = maskl(i[3], p - 96);
484 break; 499 break;
@@ -665,6 +680,7 @@ hashlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
665 680
666static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par) 681static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par)
667{ 682{
683 struct net *net = par->net;
668 struct xt_hashlimit_info *r = par->matchinfo; 684 struct xt_hashlimit_info *r = par->matchinfo;
669 685
670 /* Check for overflow. */ 686 /* Check for overflow. */
@@ -687,25 +703,20 @@ static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par)
687 if (r->name[sizeof(r->name) - 1] != '\0') 703 if (r->name[sizeof(r->name) - 1] != '\0')
688 return false; 704 return false;
689 705
690 /* This is the best we've got: We cannot release and re-grab lock, 706 mutex_lock(&hashlimit_mutex);
691 * since checkentry() is called before x_tables.c grabs xt_mutex. 707 r->hinfo = htable_find_get(net, r->name, par->match->family);
692 * We also cannot grab the hashtable spinlock, since htable_create will 708 if (!r->hinfo && htable_create_v0(net, r, par->match->family) != 0) {
693 * call vmalloc, and that can sleep. And we cannot just re-search 709 mutex_unlock(&hashlimit_mutex);
694 * the list of htable's in htable_create(), since then we would
695 * create duplicate proc files. -HW */
696 mutex_lock(&hlimit_mutex);
697 r->hinfo = htable_find_get(r->name, par->match->family);
698 if (!r->hinfo && htable_create_v0(r, par->match->family) != 0) {
699 mutex_unlock(&hlimit_mutex);
700 return false; 710 return false;
701 } 711 }
702 mutex_unlock(&hlimit_mutex); 712 mutex_unlock(&hashlimit_mutex);
703 713
704 return true; 714 return true;
705} 715}
706 716
707static bool hashlimit_mt_check(const struct xt_mtchk_param *par) 717static bool hashlimit_mt_check(const struct xt_mtchk_param *par)
708{ 718{
719 struct net *net = par->net;
709 struct xt_hashlimit_mtinfo1 *info = par->matchinfo; 720 struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
710 721
711 /* Check for overflow. */ 722 /* Check for overflow. */
@@ -728,19 +739,13 @@ static bool hashlimit_mt_check(const struct xt_mtchk_param *par)
728 return false; 739 return false;
729 } 740 }
730 741
731 /* This is the best we've got: We cannot release and re-grab lock, 742 mutex_lock(&hashlimit_mutex);
732 * since checkentry() is called before x_tables.c grabs xt_mutex. 743 info->hinfo = htable_find_get(net, info->name, par->match->family);
733 * We also cannot grab the hashtable spinlock, since htable_create will 744 if (!info->hinfo && htable_create(net, info, par->match->family) != 0) {
734 * call vmalloc, and that can sleep. And we cannot just re-search 745 mutex_unlock(&hashlimit_mutex);
735 * the list of htable's in htable_create(), since then we would
736 * create duplicate proc files. -HW */
737 mutex_lock(&hlimit_mutex);
738 info->hinfo = htable_find_get(info->name, par->match->family);
739 if (!info->hinfo && htable_create(info, par->match->family) != 0) {
740 mutex_unlock(&hlimit_mutex);
741 return false; 746 return false;
742 } 747 }
743 mutex_unlock(&hlimit_mutex); 748 mutex_unlock(&hashlimit_mutex);
744 return true; 749 return true;
745} 750}
746 751
@@ -767,7 +772,7 @@ struct compat_xt_hashlimit_info {
767 compat_uptr_t master; 772 compat_uptr_t master;
768}; 773};
769 774
770static void hashlimit_mt_compat_from_user(void *dst, void *src) 775static void hashlimit_mt_compat_from_user(void *dst, const void *src)
771{ 776{
772 int off = offsetof(struct compat_xt_hashlimit_info, hinfo); 777 int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
773 778
@@ -775,7 +780,7 @@ static void hashlimit_mt_compat_from_user(void *dst, void *src)
775 memset(dst + off, 0, sizeof(struct compat_xt_hashlimit_info) - off); 780 memset(dst + off, 0, sizeof(struct compat_xt_hashlimit_info) - off);
776} 781}
777 782
778static int hashlimit_mt_compat_to_user(void __user *dst, void *src) 783static int hashlimit_mt_compat_to_user(void __user *dst, const void *src)
779{ 784{
780 int off = offsetof(struct compat_xt_hashlimit_info, hinfo); 785 int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
781 786
@@ -841,8 +846,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
841static void *dl_seq_start(struct seq_file *s, loff_t *pos) 846static void *dl_seq_start(struct seq_file *s, loff_t *pos)
842 __acquires(htable->lock) 847 __acquires(htable->lock)
843{ 848{
844 struct proc_dir_entry *pde = s->private; 849 struct xt_hashlimit_htable *htable = s->private;
845 struct xt_hashlimit_htable *htable = pde->data;
846 unsigned int *bucket; 850 unsigned int *bucket;
847 851
848 spin_lock_bh(&htable->lock); 852 spin_lock_bh(&htable->lock);
@@ -859,8 +863,7 @@ static void *dl_seq_start(struct seq_file *s, loff_t *pos)
859 863
860static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) 864static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
861{ 865{
862 struct proc_dir_entry *pde = s->private; 866 struct xt_hashlimit_htable *htable = s->private;
863 struct xt_hashlimit_htable *htable = pde->data;
864 unsigned int *bucket = (unsigned int *)v; 867 unsigned int *bucket = (unsigned int *)v;
865 868
866 *pos = ++(*bucket); 869 *pos = ++(*bucket);
@@ -874,11 +877,11 @@ static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
874static void dl_seq_stop(struct seq_file *s, void *v) 877static void dl_seq_stop(struct seq_file *s, void *v)
875 __releases(htable->lock) 878 __releases(htable->lock)
876{ 879{
877 struct proc_dir_entry *pde = s->private; 880 struct xt_hashlimit_htable *htable = s->private;
878 struct xt_hashlimit_htable *htable = pde->data;
879 unsigned int *bucket = (unsigned int *)v; 881 unsigned int *bucket = (unsigned int *)v;
880 882
881 kfree(bucket); 883 if (!IS_ERR(bucket))
884 kfree(bucket);
882 spin_unlock_bh(&htable->lock); 885 spin_unlock_bh(&htable->lock);
883} 886}
884 887
@@ -917,8 +920,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
917 920
918static int dl_seq_show(struct seq_file *s, void *v) 921static int dl_seq_show(struct seq_file *s, void *v)
919{ 922{
920 struct proc_dir_entry *pde = s->private; 923 struct xt_hashlimit_htable *htable = s->private;
921 struct xt_hashlimit_htable *htable = pde->data;
922 unsigned int *bucket = (unsigned int *)v; 924 unsigned int *bucket = (unsigned int *)v;
923 struct dsthash_ent *ent; 925 struct dsthash_ent *ent;
924 struct hlist_node *pos; 926 struct hlist_node *pos;
@@ -944,7 +946,7 @@ static int dl_proc_open(struct inode *inode, struct file *file)
944 946
945 if (!ret) { 947 if (!ret) {
946 struct seq_file *sf = file->private_data; 948 struct seq_file *sf = file->private_data;
947 sf->private = PDE(inode); 949 sf->private = PDE(inode)->data;
948 } 950 }
949 return ret; 951 return ret;
950} 952}
@@ -957,10 +959,61 @@ static const struct file_operations dl_file_ops = {
957 .release = seq_release 959 .release = seq_release
958}; 960};
959 961
962static int __net_init hashlimit_proc_net_init(struct net *net)
963{
964 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
965
966 hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net);
967 if (!hashlimit_net->ipt_hashlimit)
968 return -ENOMEM;
969#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
970 hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net);
971 if (!hashlimit_net->ip6t_hashlimit) {
972 proc_net_remove(net, "ipt_hashlimit");
973 return -ENOMEM;
974 }
975#endif
976 return 0;
977}
978
979static void __net_exit hashlimit_proc_net_exit(struct net *net)
980{
981 proc_net_remove(net, "ipt_hashlimit");
982#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
983 proc_net_remove(net, "ip6t_hashlimit");
984#endif
985}
986
987static int __net_init hashlimit_net_init(struct net *net)
988{
989 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
990
991 INIT_HLIST_HEAD(&hashlimit_net->htables);
992 return hashlimit_proc_net_init(net);
993}
994
995static void __net_exit hashlimit_net_exit(struct net *net)
996{
997 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
998
999 BUG_ON(!hlist_empty(&hashlimit_net->htables));
1000 hashlimit_proc_net_exit(net);
1001}
1002
1003static struct pernet_operations hashlimit_net_ops = {
1004 .init = hashlimit_net_init,
1005 .exit = hashlimit_net_exit,
1006 .id = &hashlimit_net_id,
1007 .size = sizeof(struct hashlimit_net),
1008};
1009
960static int __init hashlimit_mt_init(void) 1010static int __init hashlimit_mt_init(void)
961{ 1011{
962 int err; 1012 int err;
963 1013
1014 err = register_pernet_subsys(&hashlimit_net_ops);
1015 if (err < 0)
1016 return err;
964 err = xt_register_matches(hashlimit_mt_reg, 1017 err = xt_register_matches(hashlimit_mt_reg,
965 ARRAY_SIZE(hashlimit_mt_reg)); 1018 ARRAY_SIZE(hashlimit_mt_reg));
966 if (err < 0) 1019 if (err < 0)
@@ -974,41 +1027,21 @@ static int __init hashlimit_mt_init(void)
974 printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n"); 1027 printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n");
975 goto err2; 1028 goto err2;
976 } 1029 }
977 hashlimit_procdir4 = proc_mkdir("ipt_hashlimit", init_net.proc_net); 1030 return 0;
978 if (!hashlimit_procdir4) { 1031
979 printk(KERN_ERR "xt_hashlimit: unable to create proc dir "
980 "entry\n");
981 goto err3;
982 }
983 err = 0;
984#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
985 hashlimit_procdir6 = proc_mkdir("ip6t_hashlimit", init_net.proc_net);
986 if (!hashlimit_procdir6) {
987 printk(KERN_ERR "xt_hashlimit: unable to create proc dir "
988 "entry\n");
989 err = -ENOMEM;
990 }
991#endif
992 if (!err)
993 return 0;
994 remove_proc_entry("ipt_hashlimit", init_net.proc_net);
995err3:
996 kmem_cache_destroy(hashlimit_cachep);
997err2: 1032err2:
998 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); 1033 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
999err1: 1034err1:
1035 unregister_pernet_subsys(&hashlimit_net_ops);
1000 return err; 1036 return err;
1001 1037
1002} 1038}
1003 1039
1004static void __exit hashlimit_mt_exit(void) 1040static void __exit hashlimit_mt_exit(void)
1005{ 1041{
1006 remove_proc_entry("ipt_hashlimit", init_net.proc_net);
1007#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
1008 remove_proc_entry("ip6t_hashlimit", init_net.proc_net);
1009#endif
1010 kmem_cache_destroy(hashlimit_cachep); 1042 kmem_cache_destroy(hashlimit_cachep);
1011 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); 1043 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
1044 unregister_pernet_subsys(&hashlimit_net_ops);
1012} 1045}
1013 1046
1014module_init(hashlimit_mt_init); 1047module_init(hashlimit_mt_init);
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index 2773be6a71dd..a0ca5339af41 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -148,7 +148,7 @@ struct compat_xt_rateinfo {
148 148
149/* To keep the full "prev" timestamp, the upper 32 bits are stored in the 149/* To keep the full "prev" timestamp, the upper 32 bits are stored in the
150 * master pointer, which does not need to be preserved. */ 150 * master pointer, which does not need to be preserved. */
151static void limit_mt_compat_from_user(void *dst, void *src) 151static void limit_mt_compat_from_user(void *dst, const void *src)
152{ 152{
153 const struct compat_xt_rateinfo *cm = src; 153 const struct compat_xt_rateinfo *cm = src;
154 struct xt_rateinfo m = { 154 struct xt_rateinfo m = {
@@ -162,7 +162,7 @@ static void limit_mt_compat_from_user(void *dst, void *src)
162 memcpy(dst, &m, sizeof(m)); 162 memcpy(dst, &m, sizeof(m));
163} 163}
164 164
165static int limit_mt_compat_to_user(void __user *dst, void *src) 165static int limit_mt_compat_to_user(void __user *dst, const void *src)
166{ 166{
167 const struct xt_rateinfo *m = src; 167 const struct xt_rateinfo *m = src;
168 struct compat_xt_rateinfo cm = { 168 struct compat_xt_rateinfo cm = {
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 4d1a41bbd5d7..4169e200588d 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -334,7 +334,7 @@ static bool xt_osf_match_packet(const struct sk_buff *skb,
334 if (info->flags & XT_OSF_LOG) 334 if (info->flags & XT_OSF_LOG)
335 nf_log_packet(p->family, p->hooknum, skb, 335 nf_log_packet(p->family, p->hooknum, skb,
336 p->in, p->out, NULL, 336 p->in, p->out, NULL,
337 "%s [%s:%s] : %pi4:%d -> %pi4:%d hops=%d\n", 337 "%s [%s:%s] : %pI4:%d -> %pI4:%d hops=%d\n",
338 f->genre, f->version, f->subtype, 338 f->genre, f->version, f->subtype,
339 &ip->saddr, ntohs(tcp->source), 339 &ip->saddr, ntohs(tcp->source),
340 &ip->daddr, ntohs(tcp->dest), 340 &ip->daddr, ntohs(tcp->dest),
@@ -349,7 +349,7 @@ static bool xt_osf_match_packet(const struct sk_buff *skb,
349 349
350 if (!fcount && (info->flags & XT_OSF_LOG)) 350 if (!fcount && (info->flags & XT_OSF_LOG))
351 nf_log_packet(p->family, p->hooknum, skb, p->in, p->out, NULL, 351 nf_log_packet(p->family, p->hooknum, skb, p->in, p->out, NULL,
352 "Remote OS is not known: %pi4:%u -> %pi4:%u\n", 352 "Remote OS is not known: %pI4:%u -> %pI4:%u\n",
353 &ip->saddr, ntohs(tcp->source), 353 &ip->saddr, ntohs(tcp->source),
354 &ip->daddr, ntohs(tcp->dest)); 354 &ip->daddr, ntohs(tcp->dest));
355 355
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index fc70a49c0afd..971d172afece 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -28,6 +28,7 @@
28#include <linux/skbuff.h> 28#include <linux/skbuff.h>
29#include <linux/inet.h> 29#include <linux/inet.h>
30#include <net/net_namespace.h> 30#include <net/net_namespace.h>
31#include <net/netns/generic.h>
31 32
32#include <linux/netfilter/x_tables.h> 33#include <linux/netfilter/x_tables.h>
33#include <linux/netfilter/xt_recent.h> 34#include <linux/netfilter/xt_recent.h>
@@ -52,7 +53,7 @@ module_param(ip_list_perms, uint, 0400);
52module_param(ip_list_uid, uint, 0400); 53module_param(ip_list_uid, uint, 0400);
53module_param(ip_list_gid, uint, 0400); 54module_param(ip_list_gid, uint, 0400);
54MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list"); 55MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list");
55MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP to remember (max. 255)"); 56MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)");
56MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs"); 57MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs");
57MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files"); 58MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files");
58MODULE_PARM_DESC(ip_list_uid,"owner of /proc/net/xt_recent/* files"); 59MODULE_PARM_DESC(ip_list_uid,"owner of /proc/net/xt_recent/* files");
@@ -78,37 +79,40 @@ struct recent_table {
78 struct list_head iphash[0]; 79 struct list_head iphash[0];
79}; 80};
80 81
81static LIST_HEAD(tables); 82struct recent_net {
83 struct list_head tables;
84#ifdef CONFIG_PROC_FS
85 struct proc_dir_entry *xt_recent;
86#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
87 struct proc_dir_entry *ipt_recent;
88#endif
89#endif
90};
91
92static int recent_net_id;
93static inline struct recent_net *recent_pernet(struct net *net)
94{
95 return net_generic(net, recent_net_id);
96}
97
82static DEFINE_SPINLOCK(recent_lock); 98static DEFINE_SPINLOCK(recent_lock);
83static DEFINE_MUTEX(recent_mutex); 99static DEFINE_MUTEX(recent_mutex);
84 100
85#ifdef CONFIG_PROC_FS 101#ifdef CONFIG_PROC_FS
86#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
87static struct proc_dir_entry *proc_old_dir;
88#endif
89static struct proc_dir_entry *recent_proc_dir;
90static const struct file_operations recent_old_fops, recent_mt_fops; 102static const struct file_operations recent_old_fops, recent_mt_fops;
91#endif 103#endif
92 104
93static u_int32_t hash_rnd; 105static u_int32_t hash_rnd __read_mostly;
94static bool hash_rnd_initted; 106static bool hash_rnd_inited __read_mostly;
95 107
96static unsigned int recent_entry_hash4(const union nf_inet_addr *addr) 108static inline unsigned int recent_entry_hash4(const union nf_inet_addr *addr)
97{ 109{
98 if (!hash_rnd_initted) {
99 get_random_bytes(&hash_rnd, sizeof(hash_rnd));
100 hash_rnd_initted = true;
101 }
102 return jhash_1word((__force u32)addr->ip, hash_rnd) & 110 return jhash_1word((__force u32)addr->ip, hash_rnd) &
103 (ip_list_hash_size - 1); 111 (ip_list_hash_size - 1);
104} 112}
105 113
106static unsigned int recent_entry_hash6(const union nf_inet_addr *addr) 114static inline unsigned int recent_entry_hash6(const union nf_inet_addr *addr)
107{ 115{
108 if (!hash_rnd_initted) {
109 get_random_bytes(&hash_rnd, sizeof(hash_rnd));
110 hash_rnd_initted = true;
111 }
112 return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6), hash_rnd) & 116 return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6), hash_rnd) &
113 (ip_list_hash_size - 1); 117 (ip_list_hash_size - 1);
114} 118}
@@ -173,18 +177,19 @@ recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
173 177
174static void recent_entry_update(struct recent_table *t, struct recent_entry *e) 178static void recent_entry_update(struct recent_table *t, struct recent_entry *e)
175{ 179{
180 e->index %= ip_pkt_list_tot;
176 e->stamps[e->index++] = jiffies; 181 e->stamps[e->index++] = jiffies;
177 if (e->index > e->nstamps) 182 if (e->index > e->nstamps)
178 e->nstamps = e->index; 183 e->nstamps = e->index;
179 e->index %= ip_pkt_list_tot;
180 list_move_tail(&e->lru_list, &t->lru_list); 184 list_move_tail(&e->lru_list, &t->lru_list);
181} 185}
182 186
183static struct recent_table *recent_table_lookup(const char *name) 187static struct recent_table *recent_table_lookup(struct recent_net *recent_net,
188 const char *name)
184{ 189{
185 struct recent_table *t; 190 struct recent_table *t;
186 191
187 list_for_each_entry(t, &tables, list) 192 list_for_each_entry(t, &recent_net->tables, list)
188 if (!strcmp(t->name, name)) 193 if (!strcmp(t->name, name))
189 return t; 194 return t;
190 return NULL; 195 return NULL;
@@ -203,6 +208,8 @@ static void recent_table_flush(struct recent_table *t)
203static bool 208static bool
204recent_mt(const struct sk_buff *skb, const struct xt_match_param *par) 209recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
205{ 210{
211 struct net *net = dev_net(par->in ? par->in : par->out);
212 struct recent_net *recent_net = recent_pernet(net);
206 const struct xt_recent_mtinfo *info = par->matchinfo; 213 const struct xt_recent_mtinfo *info = par->matchinfo;
207 struct recent_table *t; 214 struct recent_table *t;
208 struct recent_entry *e; 215 struct recent_entry *e;
@@ -235,7 +242,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
235 ttl++; 242 ttl++;
236 243
237 spin_lock_bh(&recent_lock); 244 spin_lock_bh(&recent_lock);
238 t = recent_table_lookup(info->name); 245 t = recent_table_lookup(recent_net, info->name);
239 e = recent_entry_lookup(t, &addr, par->match->family, 246 e = recent_entry_lookup(t, &addr, par->match->family,
240 (info->check_set & XT_RECENT_TTL) ? ttl : 0); 247 (info->check_set & XT_RECENT_TTL) ? ttl : 0);
241 if (e == NULL) { 248 if (e == NULL) {
@@ -260,7 +267,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
260 for (i = 0; i < e->nstamps; i++) { 267 for (i = 0; i < e->nstamps; i++) {
261 if (info->seconds && time_after(time, e->stamps[i])) 268 if (info->seconds && time_after(time, e->stamps[i]))
262 continue; 269 continue;
263 if (++hits >= info->hit_count) { 270 if (!info->hit_count || ++hits >= info->hit_count) {
264 ret = !ret; 271 ret = !ret;
265 break; 272 break;
266 } 273 }
@@ -279,6 +286,7 @@ out:
279 286
280static bool recent_mt_check(const struct xt_mtchk_param *par) 287static bool recent_mt_check(const struct xt_mtchk_param *par)
281{ 288{
289 struct recent_net *recent_net = recent_pernet(par->net);
282 const struct xt_recent_mtinfo *info = par->matchinfo; 290 const struct xt_recent_mtinfo *info = par->matchinfo;
283 struct recent_table *t; 291 struct recent_table *t;
284#ifdef CONFIG_PROC_FS 292#ifdef CONFIG_PROC_FS
@@ -287,6 +295,10 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
287 unsigned i; 295 unsigned i;
288 bool ret = false; 296 bool ret = false;
289 297
298 if (unlikely(!hash_rnd_inited)) {
299 get_random_bytes(&hash_rnd, sizeof(hash_rnd));
300 hash_rnd_inited = true;
301 }
290 if (hweight8(info->check_set & 302 if (hweight8(info->check_set &
291 (XT_RECENT_SET | XT_RECENT_REMOVE | 303 (XT_RECENT_SET | XT_RECENT_REMOVE |
292 XT_RECENT_CHECK | XT_RECENT_UPDATE)) != 1) 304 XT_RECENT_CHECK | XT_RECENT_UPDATE)) != 1)
@@ -294,14 +306,18 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
294 if ((info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE)) && 306 if ((info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE)) &&
295 (info->seconds || info->hit_count)) 307 (info->seconds || info->hit_count))
296 return false; 308 return false;
297 if (info->hit_count > ip_pkt_list_tot) 309 if (info->hit_count > ip_pkt_list_tot) {
310 pr_info(KBUILD_MODNAME ": hitcount (%u) is larger than "
311 "packets to be remembered (%u)\n",
312 info->hit_count, ip_pkt_list_tot);
298 return false; 313 return false;
314 }
299 if (info->name[0] == '\0' || 315 if (info->name[0] == '\0' ||
300 strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN) 316 strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN)
301 return false; 317 return false;
302 318
303 mutex_lock(&recent_mutex); 319 mutex_lock(&recent_mutex);
304 t = recent_table_lookup(info->name); 320 t = recent_table_lookup(recent_net, info->name);
305 if (t != NULL) { 321 if (t != NULL) {
306 t->refcnt++; 322 t->refcnt++;
307 ret = true; 323 ret = true;
@@ -318,7 +334,7 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
318 for (i = 0; i < ip_list_hash_size; i++) 334 for (i = 0; i < ip_list_hash_size; i++)
319 INIT_LIST_HEAD(&t->iphash[i]); 335 INIT_LIST_HEAD(&t->iphash[i]);
320#ifdef CONFIG_PROC_FS 336#ifdef CONFIG_PROC_FS
321 pde = proc_create_data(t->name, ip_list_perms, recent_proc_dir, 337 pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent,
322 &recent_mt_fops, t); 338 &recent_mt_fops, t);
323 if (pde == NULL) { 339 if (pde == NULL) {
324 kfree(t); 340 kfree(t);
@@ -327,10 +343,10 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
327 pde->uid = ip_list_uid; 343 pde->uid = ip_list_uid;
328 pde->gid = ip_list_gid; 344 pde->gid = ip_list_gid;
329#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT 345#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
330 pde = proc_create_data(t->name, ip_list_perms, proc_old_dir, 346 pde = proc_create_data(t->name, ip_list_perms, recent_net->ipt_recent,
331 &recent_old_fops, t); 347 &recent_old_fops, t);
332 if (pde == NULL) { 348 if (pde == NULL) {
333 remove_proc_entry(t->name, proc_old_dir); 349 remove_proc_entry(t->name, recent_net->xt_recent);
334 kfree(t); 350 kfree(t);
335 goto out; 351 goto out;
336 } 352 }
@@ -339,7 +355,7 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
339#endif 355#endif
340#endif 356#endif
341 spin_lock_bh(&recent_lock); 357 spin_lock_bh(&recent_lock);
342 list_add_tail(&t->list, &tables); 358 list_add_tail(&t->list, &recent_net->tables);
343 spin_unlock_bh(&recent_lock); 359 spin_unlock_bh(&recent_lock);
344 ret = true; 360 ret = true;
345out: 361out:
@@ -349,20 +365,21 @@ out:
349 365
350static void recent_mt_destroy(const struct xt_mtdtor_param *par) 366static void recent_mt_destroy(const struct xt_mtdtor_param *par)
351{ 367{
368 struct recent_net *recent_net = recent_pernet(par->net);
352 const struct xt_recent_mtinfo *info = par->matchinfo; 369 const struct xt_recent_mtinfo *info = par->matchinfo;
353 struct recent_table *t; 370 struct recent_table *t;
354 371
355 mutex_lock(&recent_mutex); 372 mutex_lock(&recent_mutex);
356 t = recent_table_lookup(info->name); 373 t = recent_table_lookup(recent_net, info->name);
357 if (--t->refcnt == 0) { 374 if (--t->refcnt == 0) {
358 spin_lock_bh(&recent_lock); 375 spin_lock_bh(&recent_lock);
359 list_del(&t->list); 376 list_del(&t->list);
360 spin_unlock_bh(&recent_lock); 377 spin_unlock_bh(&recent_lock);
361#ifdef CONFIG_PROC_FS 378#ifdef CONFIG_PROC_FS
362#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT 379#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
363 remove_proc_entry(t->name, proc_old_dir); 380 remove_proc_entry(t->name, recent_net->ipt_recent);
364#endif 381#endif
365 remove_proc_entry(t->name, recent_proc_dir); 382 remove_proc_entry(t->name, recent_net->xt_recent);
366#endif 383#endif
367 recent_table_flush(t); 384 recent_table_flush(t);
368 kfree(t); 385 kfree(t);
@@ -611,8 +628,65 @@ static const struct file_operations recent_mt_fops = {
611 .release = seq_release_private, 628 .release = seq_release_private,
612 .owner = THIS_MODULE, 629 .owner = THIS_MODULE,
613}; 630};
631
632static int __net_init recent_proc_net_init(struct net *net)
633{
634 struct recent_net *recent_net = recent_pernet(net);
635
636 recent_net->xt_recent = proc_mkdir("xt_recent", net->proc_net);
637 if (!recent_net->xt_recent)
638 return -ENOMEM;
639#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
640 recent_net->ipt_recent = proc_mkdir("ipt_recent", net->proc_net);
641 if (!recent_net->ipt_recent) {
642 proc_net_remove(net, "xt_recent");
643 return -ENOMEM;
644 }
645#endif
646 return 0;
647}
648
649static void __net_exit recent_proc_net_exit(struct net *net)
650{
651#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
652 proc_net_remove(net, "ipt_recent");
653#endif
654 proc_net_remove(net, "xt_recent");
655}
656#else
657static inline int recent_proc_net_init(struct net *net)
658{
659 return 0;
660}
661
662static inline void recent_proc_net_exit(struct net *net)
663{
664}
614#endif /* CONFIG_PROC_FS */ 665#endif /* CONFIG_PROC_FS */
615 666
667static int __net_init recent_net_init(struct net *net)
668{
669 struct recent_net *recent_net = recent_pernet(net);
670
671 INIT_LIST_HEAD(&recent_net->tables);
672 return recent_proc_net_init(net);
673}
674
675static void __net_exit recent_net_exit(struct net *net)
676{
677 struct recent_net *recent_net = recent_pernet(net);
678
679 BUG_ON(!list_empty(&recent_net->tables));
680 recent_proc_net_exit(net);
681}
682
683static struct pernet_operations recent_net_ops = {
684 .init = recent_net_init,
685 .exit = recent_net_exit,
686 .id = &recent_net_id,
687 .size = sizeof(struct recent_net),
688};
689
616static struct xt_match recent_mt_reg[] __read_mostly = { 690static struct xt_match recent_mt_reg[] __read_mostly = {
617 { 691 {
618 .name = "recent", 692 .name = "recent",
@@ -644,39 +718,19 @@ static int __init recent_mt_init(void)
644 return -EINVAL; 718 return -EINVAL;
645 ip_list_hash_size = 1 << fls(ip_list_tot); 719 ip_list_hash_size = 1 << fls(ip_list_tot);
646 720
647 err = xt_register_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg)); 721 err = register_pernet_subsys(&recent_net_ops);
648#ifdef CONFIG_PROC_FS
649 if (err) 722 if (err)
650 return err; 723 return err;
651 recent_proc_dir = proc_mkdir("xt_recent", init_net.proc_net); 724 err = xt_register_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg));
652 if (recent_proc_dir == NULL) { 725 if (err)
653 xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg)); 726 unregister_pernet_subsys(&recent_net_ops);
654 err = -ENOMEM;
655 }
656#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
657 if (err < 0)
658 return err;
659 proc_old_dir = proc_mkdir("ipt_recent", init_net.proc_net);
660 if (proc_old_dir == NULL) {
661 remove_proc_entry("xt_recent", init_net.proc_net);
662 xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg));
663 err = -ENOMEM;
664 }
665#endif
666#endif
667 return err; 727 return err;
668} 728}
669 729
670static void __exit recent_mt_exit(void) 730static void __exit recent_mt_exit(void)
671{ 731{
672 BUG_ON(!list_empty(&tables));
673 xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg)); 732 xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg));
674#ifdef CONFIG_PROC_FS 733 unregister_pernet_subsys(&recent_net_ops);
675#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
676 remove_proc_entry("ipt_recent", init_net.proc_net);
677#endif
678 remove_proc_entry("xt_recent", init_net.proc_net);
679#endif
680} 734}
681 735
682module_init(recent_mt_init); 736module_init(recent_mt_init);
diff --git a/net/netfilter/xt_repldata.h b/net/netfilter/xt_repldata.h
new file mode 100644
index 000000000000..6efe4e5a81c6
--- /dev/null
+++ b/net/netfilter/xt_repldata.h
@@ -0,0 +1,35 @@
1/*
2 * Today's hack: quantum tunneling in structs
3 *
4 * 'entries' and 'term' are never anywhere referenced by word in code. In fact,
5 * they serve as the hanging-off data accessed through repl.data[].
6 */
7
8#define xt_alloc_initial_table(type, typ2) ({ \
9 unsigned int hook_mask = info->valid_hooks; \
10 unsigned int nhooks = hweight32(hook_mask); \
11 unsigned int bytes = 0, hooknum = 0, i = 0; \
12 struct { \
13 struct type##_replace repl; \
14 struct type##_standard entries[nhooks]; \
15 struct type##_error term; \
16 } *tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); \
17 if (tbl == NULL) \
18 return NULL; \
19 strncpy(tbl->repl.name, info->name, sizeof(tbl->repl.name)); \
20 tbl->term = (struct type##_error)typ2##_ERROR_INIT; \
21 tbl->repl.valid_hooks = hook_mask; \
22 tbl->repl.num_entries = nhooks + 1; \
23 tbl->repl.size = nhooks * sizeof(struct type##_standard) + \
24 sizeof(struct type##_error); \
25 for (; hook_mask != 0; hook_mask >>= 1, ++hooknum) { \
26 if (!(hook_mask & 1)) \
27 continue; \
28 tbl->repl.hook_entry[hooknum] = bytes; \
29 tbl->repl.underflow[hooknum] = bytes; \
30 tbl->entries[i++] = (struct type##_standard) \
31 typ2##_STANDARD_INIT(NF_ACCEPT); \
32 bytes += sizeof(struct type##_standard); \
33 } \
34 tbl; \
35})
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index c5d9f97ef217..0bfeaab88ef5 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -315,7 +315,6 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
315 entry_old = netlbl_domhsh_search_def(entry->domain); 315 entry_old = netlbl_domhsh_search_def(entry->domain);
316 if (entry_old == NULL) { 316 if (entry_old == NULL) {
317 entry->valid = 1; 317 entry->valid = 1;
318 INIT_RCU_HEAD(&entry->rcu);
319 318
320 if (entry->domain != NULL) { 319 if (entry->domain != NULL) {
321 u32 bkt = netlbl_domhsh_hash(entry->domain); 320 u32 bkt = netlbl_domhsh_hash(entry->domain);
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 98ed22ee2ff4..852d9d7976b9 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -327,7 +327,6 @@ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface,
327 entry->list.addr = addr->s_addr & mask->s_addr; 327 entry->list.addr = addr->s_addr & mask->s_addr;
328 entry->list.mask = mask->s_addr; 328 entry->list.mask = mask->s_addr;
329 entry->list.valid = 1; 329 entry->list.valid = 1;
330 INIT_RCU_HEAD(&entry->rcu);
331 entry->secid = secid; 330 entry->secid = secid;
332 331
333 spin_lock(&netlbl_unlhsh_lock); 332 spin_lock(&netlbl_unlhsh_lock);
@@ -373,7 +372,6 @@ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface,
373 entry->list.addr.s6_addr32[3] &= mask->s6_addr32[3]; 372 entry->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
374 ipv6_addr_copy(&entry->list.mask, mask); 373 ipv6_addr_copy(&entry->list.mask, mask);
375 entry->list.valid = 1; 374 entry->list.valid = 1;
376 INIT_RCU_HEAD(&entry->rcu);
377 entry->secid = secid; 375 entry->secid = secid;
378 376
379 spin_lock(&netlbl_unlhsh_lock); 377 spin_lock(&netlbl_unlhsh_lock);
@@ -410,7 +408,6 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
410 INIT_LIST_HEAD(&iface->addr4_list); 408 INIT_LIST_HEAD(&iface->addr4_list);
411 INIT_LIST_HEAD(&iface->addr6_list); 409 INIT_LIST_HEAD(&iface->addr6_list);
412 iface->valid = 1; 410 iface->valid = 1;
413 INIT_RCU_HEAD(&iface->rcu);
414 411
415 spin_lock(&netlbl_unlhsh_lock); 412 spin_lock(&netlbl_unlhsh_lock);
416 if (ifindex > 0) { 413 if (ifindex > 0) {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a4957bf2ca60..acbbae1e89b5 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -455,9 +455,14 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
455 if (nl_table[protocol].registered && 455 if (nl_table[protocol].registered &&
456 try_module_get(nl_table[protocol].module)) 456 try_module_get(nl_table[protocol].module))
457 module = nl_table[protocol].module; 457 module = nl_table[protocol].module;
458 else
459 err = -EPROTONOSUPPORT;
458 cb_mutex = nl_table[protocol].cb_mutex; 460 cb_mutex = nl_table[protocol].cb_mutex;
459 netlink_unlock_table(); 461 netlink_unlock_table();
460 462
463 if (err < 0)
464 goto out;
465
461 err = __netlink_create(net, sock, cb_mutex, protocol); 466 err = __netlink_create(net, sock, cb_mutex, protocol);
462 if (err < 0) 467 if (err < 0)
463 goto out_module; 468 goto out_module;
@@ -1088,6 +1093,7 @@ static inline int do_one_set_err(struct sock *sk,
1088 struct netlink_set_err_data *p) 1093 struct netlink_set_err_data *p)
1089{ 1094{
1090 struct netlink_sock *nlk = nlk_sk(sk); 1095 struct netlink_sock *nlk = nlk_sk(sk);
1096 int ret = 0;
1091 1097
1092 if (sk == p->exclude_sk) 1098 if (sk == p->exclude_sk)
1093 goto out; 1099 goto out;
@@ -1099,10 +1105,15 @@ static inline int do_one_set_err(struct sock *sk,
1099 !test_bit(p->group - 1, nlk->groups)) 1105 !test_bit(p->group - 1, nlk->groups))
1100 goto out; 1106 goto out;
1101 1107
1108 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
1109 ret = 1;
1110 goto out;
1111 }
1112
1102 sk->sk_err = p->code; 1113 sk->sk_err = p->code;
1103 sk->sk_error_report(sk); 1114 sk->sk_error_report(sk);
1104out: 1115out:
1105 return 0; 1116 return ret;
1106} 1117}
1107 1118
1108/** 1119/**
@@ -1111,12 +1122,16 @@ out:
1111 * @pid: the PID of a process that we want to skip (if any) 1122 * @pid: the PID of a process that we want to skip (if any)
1112 * @groups: the broadcast group that will notice the error 1123 * @groups: the broadcast group that will notice the error
1113 * @code: error code, must be negative (as usual in kernelspace) 1124 * @code: error code, must be negative (as usual in kernelspace)
1125 *
1126 * This function returns the number of broadcast listeners that have set the
1127 * NETLINK_RECV_NO_ENOBUFS socket option.
1114 */ 1128 */
1115void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) 1129int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1116{ 1130{
1117 struct netlink_set_err_data info; 1131 struct netlink_set_err_data info;
1118 struct hlist_node *node; 1132 struct hlist_node *node;
1119 struct sock *sk; 1133 struct sock *sk;
1134 int ret = 0;
1120 1135
1121 info.exclude_sk = ssk; 1136 info.exclude_sk = ssk;
1122 info.pid = pid; 1137 info.pid = pid;
@@ -1127,9 +1142,10 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1127 read_lock(&nl_table_lock); 1142 read_lock(&nl_table_lock);
1128 1143
1129 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) 1144 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1130 do_one_set_err(sk, &info); 1145 ret += do_one_set_err(sk, &info);
1131 1146
1132 read_unlock(&nl_table_lock); 1147 read_unlock(&nl_table_lock);
1148 return ret;
1133} 1149}
1134EXPORT_SYMBOL(netlink_set_err); 1150EXPORT_SYMBOL(netlink_set_err);
1135 1151
@@ -1973,12 +1989,12 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
1973 if (v == SEQ_START_TOKEN) 1989 if (v == SEQ_START_TOKEN)
1974 seq_puts(seq, 1990 seq_puts(seq,
1975 "sk Eth Pid Groups " 1991 "sk Eth Pid Groups "
1976 "Rmem Wmem Dump Locks Drops\n"); 1992 "Rmem Wmem Dump Locks Drops Inode\n");
1977 else { 1993 else {
1978 struct sock *s = v; 1994 struct sock *s = v;
1979 struct netlink_sock *nlk = nlk_sk(s); 1995 struct netlink_sock *nlk = nlk_sk(s);
1980 1996
1981 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n", 1997 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d %-8lu\n",
1982 s, 1998 s,
1983 s->sk_protocol, 1999 s->sk_protocol,
1984 nlk->pid, 2000 nlk->pid,
@@ -1987,7 +2003,8 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
1987 sk_wmem_alloc_get(s), 2003 sk_wmem_alloc_get(s),
1988 nlk->cb, 2004 nlk->cb,
1989 atomic_read(&s->sk_refcnt), 2005 atomic_read(&s->sk_refcnt),
1990 atomic_read(&s->sk_drops) 2006 atomic_read(&s->sk_drops),
2007 sock_i_ino(s)
1991 ); 2008 );
1992 2009
1993 } 2010 }
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index d07ecda0a92d..a4b6e148c5de 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -681,9 +681,7 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
681 int chains_to_skip = cb->args[0]; 681 int chains_to_skip = cb->args[0];
682 int fams_to_skip = cb->args[1]; 682 int fams_to_skip = cb->args[1];
683 683
684 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) { 684 for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
685 if (i < chains_to_skip)
686 continue;
687 n = 0; 685 n = 0;
688 list_for_each_entry(rt, genl_family_chain(i), family_list) { 686 list_for_each_entry(rt, genl_family_chain(i), family_list) {
689 if (!rt->netnsok && !net_eq(net, &init_net)) 687 if (!rt->netnsok && !net_eq(net, &init_net))
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 71604c6613b5..a249127020a5 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1267,28 +1267,13 @@ static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1267 1267
1268static void *nr_info_start(struct seq_file *seq, loff_t *pos) 1268static void *nr_info_start(struct seq_file *seq, loff_t *pos)
1269{ 1269{
1270 struct sock *s;
1271 struct hlist_node *node;
1272 int i = 1;
1273
1274 spin_lock_bh(&nr_list_lock); 1270 spin_lock_bh(&nr_list_lock);
1275 if (*pos == 0) 1271 return seq_hlist_start_head(&nr_list, *pos);
1276 return SEQ_START_TOKEN;
1277
1278 sk_for_each(s, node, &nr_list) {
1279 if (i == *pos)
1280 return s;
1281 ++i;
1282 }
1283 return NULL;
1284} 1272}
1285 1273
1286static void *nr_info_next(struct seq_file *seq, void *v, loff_t *pos) 1274static void *nr_info_next(struct seq_file *seq, void *v, loff_t *pos)
1287{ 1275{
1288 ++*pos; 1276 return seq_hlist_next(v, &nr_list, pos);
1289
1290 return (v == SEQ_START_TOKEN) ? sk_head(&nr_list)
1291 : sk_next((struct sock *)v);
1292} 1277}
1293 1278
1294static void nr_info_stop(struct seq_file *seq, void *v) 1279static void nr_info_stop(struct seq_file *seq, void *v)
@@ -1298,7 +1283,7 @@ static void nr_info_stop(struct seq_file *seq, void *v)
1298 1283
1299static int nr_info_show(struct seq_file *seq, void *v) 1284static int nr_info_show(struct seq_file *seq, void *v)
1300{ 1285{
1301 struct sock *s = v; 1286 struct sock *s = sk_entry(v);
1302 struct net_device *dev; 1287 struct net_device *dev;
1303 struct nr_sock *nr; 1288 struct nr_sock *nr;
1304 const char *devname; 1289 const char *devname;
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index aacba76070fc..5cc648012f50 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -843,12 +843,13 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
843 dptr = skb_push(skb, 1); 843 dptr = skb_push(skb, 1);
844 *dptr = AX25_P_NETROM; 844 *dptr = AX25_P_NETROM;
845 845
846 ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev); 846 ax25s = nr_neigh->ax25;
847 if (nr_neigh->ax25 && ax25s) { 847 nr_neigh->ax25 = ax25_send_frame(skb, 256,
848 /* We were already holding this ax25_cb */ 848 (ax25_address *)dev->dev_addr,
849 &nr_neigh->callsign,
850 nr_neigh->digipeat, nr_neigh->dev);
851 if (ax25s)
849 ax25_cb_put(ax25s); 852 ax25_cb_put(ax25s);
850 }
851 nr_neigh->ax25 = ax25s;
852 853
853 dev_put(dev); 854 dev_put(dev);
854 ret = (nr_neigh->ax25 != NULL); 855 ret = (nr_neigh->ax25 != NULL);
@@ -862,33 +863,13 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
862 863
863static void *nr_node_start(struct seq_file *seq, loff_t *pos) 864static void *nr_node_start(struct seq_file *seq, loff_t *pos)
864{ 865{
865 struct nr_node *nr_node;
866 struct hlist_node *node;
867 int i = 1;
868
869 spin_lock_bh(&nr_node_list_lock); 866 spin_lock_bh(&nr_node_list_lock);
870 if (*pos == 0) 867 return seq_hlist_start_head(&nr_node_list, *pos);
871 return SEQ_START_TOKEN;
872
873 nr_node_for_each(nr_node, node, &nr_node_list) {
874 if (i == *pos)
875 return nr_node;
876 ++i;
877 }
878
879 return NULL;
880} 868}
881 869
882static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos) 870static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
883{ 871{
884 struct hlist_node *node; 872 return seq_hlist_next(v, &nr_node_list, pos);
885 ++*pos;
886
887 node = (v == SEQ_START_TOKEN)
888 ? nr_node_list.first
889 : ((struct nr_node *)v)->node_node.next;
890
891 return hlist_entry(node, struct nr_node, node_node);
892} 873}
893 874
894static void nr_node_stop(struct seq_file *seq, void *v) 875static void nr_node_stop(struct seq_file *seq, void *v)
@@ -905,7 +886,9 @@ static int nr_node_show(struct seq_file *seq, void *v)
905 seq_puts(seq, 886 seq_puts(seq,
906 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n"); 887 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
907 else { 888 else {
908 struct nr_node *nr_node = v; 889 struct nr_node *nr_node = hlist_entry(v, struct nr_node,
890 node_node);
891
909 nr_node_lock(nr_node); 892 nr_node_lock(nr_node);
910 seq_printf(seq, "%-9s %-7s %d %d", 893 seq_printf(seq, "%-9s %-7s %d %d",
911 ax2asc(buf, &nr_node->callsign), 894 ax2asc(buf, &nr_node->callsign),
@@ -948,31 +931,13 @@ const struct file_operations nr_nodes_fops = {
948 931
949static void *nr_neigh_start(struct seq_file *seq, loff_t *pos) 932static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
950{ 933{
951 struct nr_neigh *nr_neigh;
952 struct hlist_node *node;
953 int i = 1;
954
955 spin_lock_bh(&nr_neigh_list_lock); 934 spin_lock_bh(&nr_neigh_list_lock);
956 if (*pos == 0) 935 return seq_hlist_start_head(&nr_neigh_list, *pos);
957 return SEQ_START_TOKEN;
958
959 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list) {
960 if (i == *pos)
961 return nr_neigh;
962 }
963 return NULL;
964} 936}
965 937
966static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos) 938static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
967{ 939{
968 struct hlist_node *node; 940 return seq_hlist_next(v, &nr_neigh_list, pos);
969 ++*pos;
970
971 node = (v == SEQ_START_TOKEN)
972 ? nr_neigh_list.first
973 : ((struct nr_neigh *)v)->neigh_node.next;
974
975 return hlist_entry(node, struct nr_neigh, neigh_node);
976} 941}
977 942
978static void nr_neigh_stop(struct seq_file *seq, void *v) 943static void nr_neigh_stop(struct seq_file *seq, void *v)
@@ -988,8 +953,9 @@ static int nr_neigh_show(struct seq_file *seq, void *v)
988 if (v == SEQ_START_TOKEN) 953 if (v == SEQ_START_TOKEN)
989 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n"); 954 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n");
990 else { 955 else {
991 struct nr_neigh *nr_neigh = v; 956 struct nr_neigh *nr_neigh;
992 957
958 nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node);
993 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d", 959 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d",
994 nr_neigh->number, 960 nr_neigh->number,
995 ax2asc(buf, &nr_neigh->callsign), 961 ax2asc(buf, &nr_neigh->callsign),
diff --git a/net/packet/Kconfig b/net/packet/Kconfig
index 34ff93ff894d..0060e3b396b7 100644
--- a/net/packet/Kconfig
+++ b/net/packet/Kconfig
@@ -14,13 +14,3 @@ config PACKET
14 be called af_packet. 14 be called af_packet.
15 15
16 If unsure, say Y. 16 If unsure, say Y.
17
18config PACKET_MMAP
19 bool "Packet socket: mmapped IO"
20 depends on PACKET
21 help
22 If you say Y here, the Packet protocol driver will use an IO
23 mechanism that results in faster communication.
24
25 If unsure, say N.
26
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 020562164b56..1612d417d10c 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -80,6 +80,7 @@
80#include <linux/init.h> 80#include <linux/init.h>
81#include <linux/mutex.h> 81#include <linux/mutex.h>
82#include <linux/if_vlan.h> 82#include <linux/if_vlan.h>
83#include <linux/virtio_net.h>
83 84
84#ifdef CONFIG_INET 85#ifdef CONFIG_INET
85#include <net/inet_common.h> 86#include <net/inet_common.h>
@@ -156,7 +157,6 @@ struct packet_mreq_max {
156 unsigned char mr_address[MAX_ADDR_LEN]; 157 unsigned char mr_address[MAX_ADDR_LEN];
157}; 158};
158 159
159#ifdef CONFIG_PACKET_MMAP
160static int packet_set_ring(struct sock *sk, struct tpacket_req *req, 160static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
161 int closing, int tx_ring); 161 int closing, int tx_ring);
162 162
@@ -176,7 +176,6 @@ struct packet_ring_buffer {
176 176
177struct packet_sock; 177struct packet_sock;
178static int tpacket_snd(struct packet_sock *po, struct msghdr *msg); 178static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
179#endif
180 179
181static void packet_flush_mclist(struct sock *sk); 180static void packet_flush_mclist(struct sock *sk);
182 181
@@ -184,26 +183,23 @@ struct packet_sock {
184 /* struct sock has to be the first member of packet_sock */ 183 /* struct sock has to be the first member of packet_sock */
185 struct sock sk; 184 struct sock sk;
186 struct tpacket_stats stats; 185 struct tpacket_stats stats;
187#ifdef CONFIG_PACKET_MMAP
188 struct packet_ring_buffer rx_ring; 186 struct packet_ring_buffer rx_ring;
189 struct packet_ring_buffer tx_ring; 187 struct packet_ring_buffer tx_ring;
190 int copy_thresh; 188 int copy_thresh;
191#endif
192 spinlock_t bind_lock; 189 spinlock_t bind_lock;
193 struct mutex pg_vec_lock; 190 struct mutex pg_vec_lock;
194 unsigned int running:1, /* prot_hook is attached*/ 191 unsigned int running:1, /* prot_hook is attached*/
195 auxdata:1, 192 auxdata:1,
196 origdev:1; 193 origdev:1,
194 has_vnet_hdr:1;
197 int ifindex; /* bound device */ 195 int ifindex; /* bound device */
198 __be16 num; 196 __be16 num;
199 struct packet_mclist *mclist; 197 struct packet_mclist *mclist;
200#ifdef CONFIG_PACKET_MMAP
201 atomic_t mapped; 198 atomic_t mapped;
202 enum tpacket_versions tp_version; 199 enum tpacket_versions tp_version;
203 unsigned int tp_hdrlen; 200 unsigned int tp_hdrlen;
204 unsigned int tp_reserve; 201 unsigned int tp_reserve;
205 unsigned int tp_loss:1; 202 unsigned int tp_loss:1;
206#endif
207 struct packet_type prot_hook ____cacheline_aligned_in_smp; 203 struct packet_type prot_hook ____cacheline_aligned_in_smp;
208}; 204};
209 205
@@ -217,8 +213,6 @@ struct packet_skb_cb {
217 213
218#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 214#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
219 215
220#ifdef CONFIG_PACKET_MMAP
221
222static void __packet_set_status(struct packet_sock *po, void *frame, int status) 216static void __packet_set_status(struct packet_sock *po, void *frame, int status)
223{ 217{
224 union { 218 union {
@@ -313,8 +307,6 @@ static inline void packet_increment_head(struct packet_ring_buffer *buff)
313 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; 307 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
314} 308}
315 309
316#endif
317
318static inline struct packet_sock *pkt_sk(struct sock *sk) 310static inline struct packet_sock *pkt_sk(struct sock *sk)
319{ 311{
320 return (struct packet_sock *)sk; 312 return (struct packet_sock *)sk;
@@ -415,7 +407,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
415{ 407{
416 struct sock *sk = sock->sk; 408 struct sock *sk = sock->sk;
417 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name; 409 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
418 struct sk_buff *skb; 410 struct sk_buff *skb = NULL;
419 struct net_device *dev; 411 struct net_device *dev;
420 __be16 proto = 0; 412 __be16 proto = 0;
421 int err; 413 int err;
@@ -437,6 +429,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
437 */ 429 */
438 430
439 saddr->spkt_device[13] = 0; 431 saddr->spkt_device[13] = 0;
432retry:
440 rcu_read_lock(); 433 rcu_read_lock();
441 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device); 434 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
442 err = -ENODEV; 435 err = -ENODEV;
@@ -456,58 +449,48 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
456 if (len > dev->mtu + dev->hard_header_len) 449 if (len > dev->mtu + dev->hard_header_len)
457 goto out_unlock; 450 goto out_unlock;
458 451
459 err = -ENOBUFS; 452 if (!skb) {
460 skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL); 453 size_t reserved = LL_RESERVED_SPACE(dev);
461 454 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
462 /* 455
463 * If the write buffer is full, then tough. At this level the user 456 rcu_read_unlock();
464 * gets to deal with the problem - do your own algorithmic backoffs. 457 skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
465 * That's far more flexible. 458 if (skb == NULL)
466 */ 459 return -ENOBUFS;
467 460 /* FIXME: Save some space for broken drivers that write a hard
468 if (skb == NULL) 461 * header at transmission time by themselves. PPP is the notable
469 goto out_unlock; 462 * one here. This should really be fixed at the driver level.
470 463 */
471 /* 464 skb_reserve(skb, reserved);
472 * Fill it in 465 skb_reset_network_header(skb);
473 */ 466
474 467 /* Try to align data part correctly */
475 /* FIXME: Save some space for broken drivers that write a 468 if (hhlen) {
476 * hard header at transmission time by themselves. PPP is the 469 skb->data -= hhlen;
477 * notable one here. This should really be fixed at the driver level. 470 skb->tail -= hhlen;
478 */ 471 if (len < hhlen)
479 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 472 skb_reset_network_header(skb);
480 skb_reset_network_header(skb); 473 }
481 474 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
482 /* Try to align data part correctly */ 475 if (err)
483 if (dev->header_ops) { 476 goto out_free;
484 skb->data -= dev->hard_header_len; 477 goto retry;
485 skb->tail -= dev->hard_header_len;
486 if (len < dev->hard_header_len)
487 skb_reset_network_header(skb);
488 } 478 }
489 479
490 /* Returns -EFAULT on error */ 480
491 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
492 skb->protocol = proto; 481 skb->protocol = proto;
493 skb->dev = dev; 482 skb->dev = dev;
494 skb->priority = sk->sk_priority; 483 skb->priority = sk->sk_priority;
495 skb->mark = sk->sk_mark; 484 skb->mark = sk->sk_mark;
496 if (err)
497 goto out_free;
498
499 /*
500 * Now send it
501 */
502 485
503 dev_queue_xmit(skb); 486 dev_queue_xmit(skb);
504 rcu_read_unlock(); 487 rcu_read_unlock();
505 return len; 488 return len;
506 489
507out_free:
508 kfree_skb(skb);
509out_unlock: 490out_unlock:
510 rcu_read_unlock(); 491 rcu_read_unlock();
492out_free:
493 kfree_skb(skb);
511 return err; 494 return err;
512} 495}
513 496
@@ -517,7 +500,7 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
517 struct sk_filter *filter; 500 struct sk_filter *filter;
518 501
519 rcu_read_lock_bh(); 502 rcu_read_lock_bh();
520 filter = rcu_dereference(sk->sk_filter); 503 filter = rcu_dereference_bh(sk->sk_filter);
521 if (filter != NULL) 504 if (filter != NULL)
522 res = sk_run_filter(skb, filter->insns, filter->len); 505 res = sk_run_filter(skb, filter->insns, filter->len);
523 rcu_read_unlock_bh(); 506 rcu_read_unlock_bh();
@@ -647,7 +630,6 @@ drop:
647 return 0; 630 return 0;
648} 631}
649 632
650#ifdef CONFIG_PACKET_MMAP
651static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 633static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
652 struct packet_type *pt, struct net_device *orig_dev) 634 struct packet_type *pt, struct net_device *orig_dev)
653{ 635{
@@ -1030,8 +1012,20 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
1030 1012
1031 status = TP_STATUS_SEND_REQUEST; 1013 status = TP_STATUS_SEND_REQUEST;
1032 err = dev_queue_xmit(skb); 1014 err = dev_queue_xmit(skb);
1033 if (unlikely(err > 0 && (err = net_xmit_errno(err)) != 0)) 1015 if (unlikely(err > 0)) {
1034 goto out_xmit; 1016 err = net_xmit_errno(err);
1017 if (err && __packet_get_status(po, ph) ==
1018 TP_STATUS_AVAILABLE) {
1019 /* skb was destructed already */
1020 skb = NULL;
1021 goto out_status;
1022 }
1023 /*
1024 * skb was dropped but not destructed yet;
1025 * let's treat it like congestion or err < 0
1026 */
1027 err = 0;
1028 }
1035 packet_increment_head(&po->tx_ring); 1029 packet_increment_head(&po->tx_ring);
1036 len_sum += tp_len; 1030 len_sum += tp_len;
1037 } while (likely((ph != NULL) || 1031 } while (likely((ph != NULL) ||
@@ -1042,9 +1036,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
1042 err = len_sum; 1036 err = len_sum;
1043 goto out_put; 1037 goto out_put;
1044 1038
1045out_xmit:
1046 skb->destructor = sock_wfree;
1047 atomic_dec(&po->tx_ring.pending);
1048out_status: 1039out_status:
1049 __packet_set_status(po, ph, status); 1040 __packet_set_status(po, ph, status);
1050 kfree_skb(skb); 1041 kfree_skb(skb);
@@ -1054,7 +1045,30 @@ out:
1054 mutex_unlock(&po->pg_vec_lock); 1045 mutex_unlock(&po->pg_vec_lock);
1055 return err; 1046 return err;
1056} 1047}
1057#endif 1048
1049static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
1050 size_t reserve, size_t len,
1051 size_t linear, int noblock,
1052 int *err)
1053{
1054 struct sk_buff *skb;
1055
1056 /* Under a page? Don't bother with paged skb. */
1057 if (prepad + len < PAGE_SIZE || !linear)
1058 linear = len;
1059
1060 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1061 err);
1062 if (!skb)
1063 return NULL;
1064
1065 skb_reserve(skb, reserve);
1066 skb_put(skb, linear);
1067 skb->data_len = len - linear;
1068 skb->len += len - linear;
1069
1070 return skb;
1071}
1058 1072
1059static int packet_snd(struct socket *sock, 1073static int packet_snd(struct socket *sock,
1060 struct msghdr *msg, size_t len) 1074 struct msghdr *msg, size_t len)
@@ -1066,14 +1080,17 @@ static int packet_snd(struct socket *sock,
1066 __be16 proto; 1080 __be16 proto;
1067 unsigned char *addr; 1081 unsigned char *addr;
1068 int ifindex, err, reserve = 0; 1082 int ifindex, err, reserve = 0;
1083 struct virtio_net_hdr vnet_hdr = { 0 };
1084 int offset = 0;
1085 int vnet_hdr_len;
1086 struct packet_sock *po = pkt_sk(sk);
1087 unsigned short gso_type = 0;
1069 1088
1070 /* 1089 /*
1071 * Get and verify the address. 1090 * Get and verify the address.
1072 */ 1091 */
1073 1092
1074 if (saddr == NULL) { 1093 if (saddr == NULL) {
1075 struct packet_sock *po = pkt_sk(sk);
1076
1077 ifindex = po->ifindex; 1094 ifindex = po->ifindex;
1078 proto = po->num; 1095 proto = po->num;
1079 addr = NULL; 1096 addr = NULL;
@@ -1100,25 +1117,74 @@ static int packet_snd(struct socket *sock,
1100 if (!(dev->flags & IFF_UP)) 1117 if (!(dev->flags & IFF_UP))
1101 goto out_unlock; 1118 goto out_unlock;
1102 1119
1120 if (po->has_vnet_hdr) {
1121 vnet_hdr_len = sizeof(vnet_hdr);
1122
1123 err = -EINVAL;
1124 if (len < vnet_hdr_len)
1125 goto out_unlock;
1126
1127 len -= vnet_hdr_len;
1128
1129 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
1130 vnet_hdr_len);
1131 if (err < 0)
1132 goto out_unlock;
1133
1134 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1135 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
1136 vnet_hdr.hdr_len))
1137 vnet_hdr.hdr_len = vnet_hdr.csum_start +
1138 vnet_hdr.csum_offset + 2;
1139
1140 err = -EINVAL;
1141 if (vnet_hdr.hdr_len > len)
1142 goto out_unlock;
1143
1144 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1145 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1146 case VIRTIO_NET_HDR_GSO_TCPV4:
1147 gso_type = SKB_GSO_TCPV4;
1148 break;
1149 case VIRTIO_NET_HDR_GSO_TCPV6:
1150 gso_type = SKB_GSO_TCPV6;
1151 break;
1152 case VIRTIO_NET_HDR_GSO_UDP:
1153 gso_type = SKB_GSO_UDP;
1154 break;
1155 default:
1156 goto out_unlock;
1157 }
1158
1159 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1160 gso_type |= SKB_GSO_TCP_ECN;
1161
1162 if (vnet_hdr.gso_size == 0)
1163 goto out_unlock;
1164
1165 }
1166 }
1167
1103 err = -EMSGSIZE; 1168 err = -EMSGSIZE;
1104 if (len > dev->mtu+reserve) 1169 if (!gso_type && (len > dev->mtu+reserve))
1105 goto out_unlock; 1170 goto out_unlock;
1106 1171
1107 skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev), 1172 err = -ENOBUFS;
1108 msg->msg_flags & MSG_DONTWAIT, &err); 1173 skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
1174 LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
1175 msg->msg_flags & MSG_DONTWAIT, &err);
1109 if (skb == NULL) 1176 if (skb == NULL)
1110 goto out_unlock; 1177 goto out_unlock;
1111 1178
1112 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 1179 skb_set_network_header(skb, reserve);
1113 skb_reset_network_header(skb);
1114 1180
1115 err = -EINVAL; 1181 err = -EINVAL;
1116 if (sock->type == SOCK_DGRAM && 1182 if (sock->type == SOCK_DGRAM &&
1117 dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len) < 0) 1183 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
1118 goto out_free; 1184 goto out_free;
1119 1185
1120 /* Returns -EFAULT on error */ 1186 /* Returns -EFAULT on error */
1121 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); 1187 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
1122 if (err) 1188 if (err)
1123 goto out_free; 1189 goto out_free;
1124 1190
@@ -1127,6 +1193,25 @@ static int packet_snd(struct socket *sock,
1127 skb->priority = sk->sk_priority; 1193 skb->priority = sk->sk_priority;
1128 skb->mark = sk->sk_mark; 1194 skb->mark = sk->sk_mark;
1129 1195
1196 if (po->has_vnet_hdr) {
1197 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1198 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
1199 vnet_hdr.csum_offset)) {
1200 err = -EINVAL;
1201 goto out_free;
1202 }
1203 }
1204
1205 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
1206 skb_shinfo(skb)->gso_type = gso_type;
1207
1208 /* Header must be checked, and gso_segs computed. */
1209 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1210 skb_shinfo(skb)->gso_segs = 0;
1211
1212 len += vnet_hdr_len;
1213 }
1214
1130 /* 1215 /*
1131 * Now send it 1216 * Now send it
1132 */ 1217 */
@@ -1151,13 +1236,11 @@ out:
1151static int packet_sendmsg(struct kiocb *iocb, struct socket *sock, 1236static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
1152 struct msghdr *msg, size_t len) 1237 struct msghdr *msg, size_t len)
1153{ 1238{
1154#ifdef CONFIG_PACKET_MMAP
1155 struct sock *sk = sock->sk; 1239 struct sock *sk = sock->sk;
1156 struct packet_sock *po = pkt_sk(sk); 1240 struct packet_sock *po = pkt_sk(sk);
1157 if (po->tx_ring.pg_vec) 1241 if (po->tx_ring.pg_vec)
1158 return tpacket_snd(po, msg); 1242 return tpacket_snd(po, msg);
1159 else 1243 else
1160#endif
1161 return packet_snd(sock, msg, len); 1244 return packet_snd(sock, msg, len);
1162} 1245}
1163 1246
@@ -1171,9 +1254,7 @@ static int packet_release(struct socket *sock)
1171 struct sock *sk = sock->sk; 1254 struct sock *sk = sock->sk;
1172 struct packet_sock *po; 1255 struct packet_sock *po;
1173 struct net *net; 1256 struct net *net;
1174#ifdef CONFIG_PACKET_MMAP
1175 struct tpacket_req req; 1257 struct tpacket_req req;
1176#endif
1177 1258
1178 if (!sk) 1259 if (!sk)
1179 return 0; 1260 return 0;
@@ -1181,28 +1262,25 @@ static int packet_release(struct socket *sock)
1181 net = sock_net(sk); 1262 net = sock_net(sk);
1182 po = pkt_sk(sk); 1263 po = pkt_sk(sk);
1183 1264
1184 write_lock_bh(&net->packet.sklist_lock); 1265 spin_lock_bh(&net->packet.sklist_lock);
1185 sk_del_node_init(sk); 1266 sk_del_node_init_rcu(sk);
1186 sock_prot_inuse_add(net, sk->sk_prot, -1); 1267 sock_prot_inuse_add(net, sk->sk_prot, -1);
1187 write_unlock_bh(&net->packet.sklist_lock); 1268 spin_unlock_bh(&net->packet.sklist_lock);
1188
1189 /*
1190 * Unhook packet receive handler.
1191 */
1192 1269
1270 spin_lock(&po->bind_lock);
1193 if (po->running) { 1271 if (po->running) {
1194 /* 1272 /*
1195 * Remove the protocol hook 1273 * Remove from protocol table
1196 */ 1274 */
1197 dev_remove_pack(&po->prot_hook);
1198 po->running = 0; 1275 po->running = 0;
1199 po->num = 0; 1276 po->num = 0;
1277 __dev_remove_pack(&po->prot_hook);
1200 __sock_put(sk); 1278 __sock_put(sk);
1201 } 1279 }
1280 spin_unlock(&po->bind_lock);
1202 1281
1203 packet_flush_mclist(sk); 1282 packet_flush_mclist(sk);
1204 1283
1205#ifdef CONFIG_PACKET_MMAP
1206 memset(&req, 0, sizeof(req)); 1284 memset(&req, 0, sizeof(req));
1207 1285
1208 if (po->rx_ring.pg_vec) 1286 if (po->rx_ring.pg_vec)
@@ -1210,12 +1288,11 @@ static int packet_release(struct socket *sock)
1210 1288
1211 if (po->tx_ring.pg_vec) 1289 if (po->tx_ring.pg_vec)
1212 packet_set_ring(sk, &req, 1, 1); 1290 packet_set_ring(sk, &req, 1, 1);
1213#endif
1214 1291
1292 synchronize_net();
1215 /* 1293 /*
1216 * Now the socket is dead. No more input will appear. 1294 * Now the socket is dead. No more input will appear.
1217 */ 1295 */
1218
1219 sock_orphan(sk); 1296 sock_orphan(sk);
1220 sock->sk = NULL; 1297 sock->sk = NULL;
1221 1298
@@ -1399,10 +1476,11 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
1399 po->running = 1; 1476 po->running = 1;
1400 } 1477 }
1401 1478
1402 write_lock_bh(&net->packet.sklist_lock); 1479 spin_lock_bh(&net->packet.sklist_lock);
1403 sk_add_node(sk, &net->packet.sklist); 1480 sk_add_node_rcu(sk, &net->packet.sklist);
1404 sock_prot_inuse_add(net, &packet_proto, 1); 1481 sock_prot_inuse_add(net, &packet_proto, 1);
1405 write_unlock_bh(&net->packet.sklist_lock); 1482 spin_unlock_bh(&net->packet.sklist_lock);
1483
1406 return 0; 1484 return 0;
1407out: 1485out:
1408 return err; 1486 return err;
@@ -1420,6 +1498,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1420 struct sk_buff *skb; 1498 struct sk_buff *skb;
1421 int copied, err; 1499 int copied, err;
1422 struct sockaddr_ll *sll; 1500 struct sockaddr_ll *sll;
1501 int vnet_hdr_len = 0;
1423 1502
1424 err = -EINVAL; 1503 err = -EINVAL;
1425 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) 1504 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
@@ -1451,6 +1530,48 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1451 if (skb == NULL) 1530 if (skb == NULL)
1452 goto out; 1531 goto out;
1453 1532
1533 if (pkt_sk(sk)->has_vnet_hdr) {
1534 struct virtio_net_hdr vnet_hdr = { 0 };
1535
1536 err = -EINVAL;
1537 vnet_hdr_len = sizeof(vnet_hdr);
1538 if ((len -= vnet_hdr_len) < 0)
1539 goto out_free;
1540
1541 if (skb_is_gso(skb)) {
1542 struct skb_shared_info *sinfo = skb_shinfo(skb);
1543
1544 /* This is a hint as to how much should be linear. */
1545 vnet_hdr.hdr_len = skb_headlen(skb);
1546 vnet_hdr.gso_size = sinfo->gso_size;
1547 if (sinfo->gso_type & SKB_GSO_TCPV4)
1548 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1549 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1550 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1551 else if (sinfo->gso_type & SKB_GSO_UDP)
1552 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1553 else if (sinfo->gso_type & SKB_GSO_FCOE)
1554 goto out_free;
1555 else
1556 BUG();
1557 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1558 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1559 } else
1560 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1561
1562 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1563 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1564 vnet_hdr.csum_start = skb->csum_start -
1565 skb_headroom(skb);
1566 vnet_hdr.csum_offset = skb->csum_offset;
1567 } /* else everything is zero */
1568
1569 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
1570 vnet_hdr_len);
1571 if (err < 0)
1572 goto out_free;
1573 }
1574
1454 /* 1575 /*
1455 * If the address length field is there to be filled in, we fill 1576 * If the address length field is there to be filled in, we fill
1456 * it in now. 1577 * it in now.
@@ -1502,7 +1623,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1502 * Free or return the buffer as appropriate. Again this 1623 * Free or return the buffer as appropriate. Again this
1503 * hides all the races and re-entrancy issues from us. 1624 * hides all the races and re-entrancy issues from us.
1504 */ 1625 */
1505 err = (flags&MSG_TRUNC) ? skb->len : copied; 1626 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
1506 1627
1507out_free: 1628out_free:
1508 skb_free_datagram(sk, skb); 1629 skb_free_datagram(sk, skb);
@@ -1567,6 +1688,8 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1567{ 1688{
1568 switch (i->type) { 1689 switch (i->type) {
1569 case PACKET_MR_MULTICAST: 1690 case PACKET_MR_MULTICAST:
1691 if (i->alen != dev->addr_len)
1692 return -EINVAL;
1570 if (what > 0) 1693 if (what > 0)
1571 return dev_mc_add(dev, i->addr, i->alen, 0); 1694 return dev_mc_add(dev, i->addr, i->alen, 0);
1572 else 1695 else
@@ -1579,6 +1702,8 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1579 return dev_set_allmulti(dev, what); 1702 return dev_set_allmulti(dev, what);
1580 break; 1703 break;
1581 case PACKET_MR_UNICAST: 1704 case PACKET_MR_UNICAST:
1705 if (i->alen != dev->addr_len)
1706 return -EINVAL;
1582 if (what > 0) 1707 if (what > 0)
1583 return dev_unicast_add(dev, i->addr); 1708 return dev_unicast_add(dev, i->addr);
1584 else 1709 else
@@ -1732,7 +1857,6 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1732 return ret; 1857 return ret;
1733 } 1858 }
1734 1859
1735#ifdef CONFIG_PACKET_MMAP
1736 case PACKET_RX_RING: 1860 case PACKET_RX_RING:
1737 case PACKET_TX_RING: 1861 case PACKET_TX_RING:
1738 { 1862 {
@@ -1740,6 +1864,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1740 1864
1741 if (optlen < sizeof(req)) 1865 if (optlen < sizeof(req))
1742 return -EINVAL; 1866 return -EINVAL;
1867 if (pkt_sk(sk)->has_vnet_hdr)
1868 return -EINVAL;
1743 if (copy_from_user(&req, optval, sizeof(req))) 1869 if (copy_from_user(&req, optval, sizeof(req)))
1744 return -EFAULT; 1870 return -EFAULT;
1745 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING); 1871 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
@@ -1801,7 +1927,6 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1801 po->tp_loss = !!val; 1927 po->tp_loss = !!val;
1802 return 0; 1928 return 0;
1803 } 1929 }
1804#endif
1805 case PACKET_AUXDATA: 1930 case PACKET_AUXDATA:
1806 { 1931 {
1807 int val; 1932 int val;
@@ -1826,6 +1951,22 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1826 po->origdev = !!val; 1951 po->origdev = !!val;
1827 return 0; 1952 return 0;
1828 } 1953 }
1954 case PACKET_VNET_HDR:
1955 {
1956 int val;
1957
1958 if (sock->type != SOCK_RAW)
1959 return -EINVAL;
1960 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1961 return -EBUSY;
1962 if (optlen < sizeof(val))
1963 return -EINVAL;
1964 if (copy_from_user(&val, optval, sizeof(val)))
1965 return -EFAULT;
1966
1967 po->has_vnet_hdr = !!val;
1968 return 0;
1969 }
1829 default: 1970 default:
1830 return -ENOPROTOOPT; 1971 return -ENOPROTOOPT;
1831 } 1972 }
@@ -1876,7 +2017,13 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1876 2017
1877 data = &val; 2018 data = &val;
1878 break; 2019 break;
1879#ifdef CONFIG_PACKET_MMAP 2020 case PACKET_VNET_HDR:
2021 if (len > sizeof(int))
2022 len = sizeof(int);
2023 val = po->has_vnet_hdr;
2024
2025 data = &val;
2026 break;
1880 case PACKET_VERSION: 2027 case PACKET_VERSION:
1881 if (len > sizeof(int)) 2028 if (len > sizeof(int))
1882 len = sizeof(int); 2029 len = sizeof(int);
@@ -1912,7 +2059,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1912 val = po->tp_loss; 2059 val = po->tp_loss;
1913 data = &val; 2060 data = &val;
1914 break; 2061 break;
1915#endif
1916 default: 2062 default:
1917 return -ENOPROTOOPT; 2063 return -ENOPROTOOPT;
1918 } 2064 }
@@ -1932,8 +2078,8 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void
1932 struct net_device *dev = data; 2078 struct net_device *dev = data;
1933 struct net *net = dev_net(dev); 2079 struct net *net = dev_net(dev);
1934 2080
1935 read_lock(&net->packet.sklist_lock); 2081 rcu_read_lock();
1936 sk_for_each(sk, node, &net->packet.sklist) { 2082 sk_for_each_rcu(sk, node, &net->packet.sklist) {
1937 struct packet_sock *po = pkt_sk(sk); 2083 struct packet_sock *po = pkt_sk(sk);
1938 2084
1939 switch (msg) { 2085 switch (msg) {
@@ -1961,18 +2107,19 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void
1961 } 2107 }
1962 break; 2108 break;
1963 case NETDEV_UP: 2109 case NETDEV_UP:
1964 spin_lock(&po->bind_lock); 2110 if (dev->ifindex == po->ifindex) {
1965 if (dev->ifindex == po->ifindex && po->num && 2111 spin_lock(&po->bind_lock);
1966 !po->running) { 2112 if (po->num && !po->running) {
1967 dev_add_pack(&po->prot_hook); 2113 dev_add_pack(&po->prot_hook);
1968 sock_hold(sk); 2114 sock_hold(sk);
1969 po->running = 1; 2115 po->running = 1;
2116 }
2117 spin_unlock(&po->bind_lock);
1970 } 2118 }
1971 spin_unlock(&po->bind_lock);
1972 break; 2119 break;
1973 } 2120 }
1974 } 2121 }
1975 read_unlock(&net->packet.sklist_lock); 2122 rcu_read_unlock();
1976 return NOTIFY_DONE; 2123 return NOTIFY_DONE;
1977} 2124}
1978 2125
@@ -2032,11 +2179,6 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
2032 return 0; 2179 return 0;
2033} 2180}
2034 2181
2035#ifndef CONFIG_PACKET_MMAP
2036#define packet_mmap sock_no_mmap
2037#define packet_poll datagram_poll
2038#else
2039
2040static unsigned int packet_poll(struct file *file, struct socket *sock, 2182static unsigned int packet_poll(struct file *file, struct socket *sock,
2041 poll_table *wait) 2183 poll_table *wait)
2042{ 2184{
@@ -2318,8 +2460,6 @@ out:
2318 mutex_unlock(&po->pg_vec_lock); 2460 mutex_unlock(&po->pg_vec_lock);
2319 return err; 2461 return err;
2320} 2462}
2321#endif
2322
2323 2463
2324static const struct proto_ops packet_ops_spkt = { 2464static const struct proto_ops packet_ops_spkt = {
2325 .family = PF_PACKET, 2465 .family = PF_PACKET,
@@ -2374,40 +2514,26 @@ static struct notifier_block packet_netdev_notifier = {
2374}; 2514};
2375 2515
2376#ifdef CONFIG_PROC_FS 2516#ifdef CONFIG_PROC_FS
2377static inline struct sock *packet_seq_idx(struct net *net, loff_t off)
2378{
2379 struct sock *s;
2380 struct hlist_node *node;
2381
2382 sk_for_each(s, node, &net->packet.sklist) {
2383 if (!off--)
2384 return s;
2385 }
2386 return NULL;
2387}
2388 2517
2389static void *packet_seq_start(struct seq_file *seq, loff_t *pos) 2518static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
2390 __acquires(seq_file_net(seq)->packet.sklist_lock) 2519 __acquires(RCU)
2391{ 2520{
2392 struct net *net = seq_file_net(seq); 2521 struct net *net = seq_file_net(seq);
2393 read_lock(&net->packet.sklist_lock); 2522
2394 return *pos ? packet_seq_idx(net, *pos - 1) : SEQ_START_TOKEN; 2523 rcu_read_lock();
2524 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
2395} 2525}
2396 2526
2397static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2527static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2398{ 2528{
2399 struct net *net = seq_file_net(seq); 2529 struct net *net = seq_file_net(seq);
2400 ++*pos; 2530 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
2401 return (v == SEQ_START_TOKEN)
2402 ? sk_head(&net->packet.sklist)
2403 : sk_next((struct sock *)v) ;
2404} 2531}
2405 2532
2406static void packet_seq_stop(struct seq_file *seq, void *v) 2533static void packet_seq_stop(struct seq_file *seq, void *v)
2407 __releases(seq_file_net(seq)->packet.sklist_lock) 2534 __releases(RCU)
2408{ 2535{
2409 struct net *net = seq_file_net(seq); 2536 rcu_read_unlock();
2410 read_unlock(&net->packet.sklist_lock);
2411} 2537}
2412 2538
2413static int packet_seq_show(struct seq_file *seq, void *v) 2539static int packet_seq_show(struct seq_file *seq, void *v)
@@ -2415,7 +2541,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
2415 if (v == SEQ_START_TOKEN) 2541 if (v == SEQ_START_TOKEN)
2416 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); 2542 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
2417 else { 2543 else {
2418 struct sock *s = v; 2544 struct sock *s = sk_entry(v);
2419 const struct packet_sock *po = pkt_sk(s); 2545 const struct packet_sock *po = pkt_sk(s);
2420 2546
2421 seq_printf(seq, 2547 seq_printf(seq,
@@ -2457,9 +2583,9 @@ static const struct file_operations packet_seq_fops = {
2457 2583
2458#endif 2584#endif
2459 2585
2460static int packet_net_init(struct net *net) 2586static int __net_init packet_net_init(struct net *net)
2461{ 2587{
2462 rwlock_init(&net->packet.sklist_lock); 2588 spin_lock_init(&net->packet.sklist_lock);
2463 INIT_HLIST_HEAD(&net->packet.sklist); 2589 INIT_HLIST_HEAD(&net->packet.sklist);
2464 2590
2465 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops)) 2591 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
@@ -2468,7 +2594,7 @@ static int packet_net_init(struct net *net)
2468 return 0; 2594 return 0;
2469} 2595}
2470 2596
2471static void packet_net_exit(struct net *net) 2597static void __net_exit packet_net_exit(struct net *net)
2472{ 2598{
2473 proc_net_remove(net, "packet"); 2599 proc_net_remove(net, "packet");
2474} 2600}
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 67f072e94d00..387197b579b1 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -75,7 +75,8 @@ static int pn_sendmsg(struct kiocb *iocb, struct sock *sk,
75 struct sk_buff *skb; 75 struct sk_buff *skb;
76 int err; 76 int err;
77 77
78 if (msg->msg_flags & MSG_OOB) 78 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
79 MSG_CMSG_COMPAT))
79 return -EOPNOTSUPP; 80 return -EOPNOTSUPP;
80 81
81 if (msg->msg_name == NULL) 82 if (msg->msg_name == NULL)
@@ -119,7 +120,8 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
119 int rval = -EOPNOTSUPP; 120 int rval = -EOPNOTSUPP;
120 int copylen; 121 int copylen;
121 122
122 if (flags & MSG_OOB) 123 if (flags & ~(MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_NOSIGNAL|
124 MSG_CMSG_COMPAT))
123 goto out_nofree; 125 goto out_nofree;
124 126
125 if (addr_len) 127 if (addr_len)
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index d183509d3fa6..d01208968c83 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -96,11 +96,11 @@ static int gprs_recv(struct gprs_dev *gp, struct sk_buff *skb)
96 goto drop; 96 goto drop;
97 } 97 }
98 98
99 if (likely(skb_headroom(skb) & 3)) { 99 if (skb_headroom(skb) & 3) {
100 struct sk_buff *rskb, *fs; 100 struct sk_buff *rskb, *fs;
101 int flen = 0; 101 int flen = 0;
102 102
103 /* Phonet Pipe data header is misaligned (3 bytes), 103 /* Phonet Pipe data header may be misaligned (3 bytes),
104 * so wrap the IP packet as a single fragment of an head-less 104 * so wrap the IP packet as a single fragment of an head-less
105 * socket buffer. The network stack will pull what it needs, 105 * socket buffer. The network stack will pull what it needs,
106 * but at least, the whole IP payload is not memcpy'd. */ 106 * but at least, the whole IP payload is not memcpy'd. */
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index b6356f3832f6..360cf377693e 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -354,6 +354,9 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
354 queue = &pn->ctrlreq_queue; 354 queue = &pn->ctrlreq_queue;
355 goto queue; 355 goto queue;
356 356
357 case PNS_PIPE_ALIGNED_DATA:
358 __skb_pull(skb, 1);
359 /* fall through */
357 case PNS_PIPE_DATA: 360 case PNS_PIPE_DATA:
358 __skb_pull(skb, 3); /* Pipe data header */ 361 __skb_pull(skb, 3); /* Pipe data header */
359 if (!pn_flow_safe(pn->rx_fc)) { 362 if (!pn_flow_safe(pn->rx_fc)) {
@@ -441,6 +444,7 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
441 struct sockaddr_pn dst; 444 struct sockaddr_pn dst;
442 u16 peer_type; 445 u16 peer_type;
443 u8 pipe_handle, enabled, n_sb; 446 u8 pipe_handle, enabled, n_sb;
447 u8 aligned = 0;
444 448
445 if (!pskb_pull(skb, sizeof(*hdr) + 4)) 449 if (!pskb_pull(skb, sizeof(*hdr) + 4))
446 return -EINVAL; 450 return -EINVAL;
@@ -479,6 +483,9 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
479 return -EINVAL; 483 return -EINVAL;
480 peer_type = (peer_type & 0xff00) | data[0]; 484 peer_type = (peer_type & 0xff00) | data[0];
481 break; 485 break;
486 case PN_PIPE_SB_ALIGNED_DATA:
487 aligned = data[0] != 0;
488 break;
482 } 489 }
483 n_sb--; 490 n_sb--;
484 } 491 }
@@ -510,6 +517,7 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
510 newpn->rx_credits = 0; 517 newpn->rx_credits = 0;
511 newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL; 518 newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
512 newpn->init_enable = enabled; 519 newpn->init_enable = enabled;
520 newpn->aligned = aligned;
513 521
514 BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue)); 522 BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue));
515 skb_queue_head(&newsk->sk_receive_queue, skb); 523 skb_queue_head(&newsk->sk_receive_queue, skb);
@@ -829,11 +837,15 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
829 return -ENOBUFS; 837 return -ENOBUFS;
830 } 838 }
831 839
832 skb_push(skb, 3); 840 skb_push(skb, 3 + pn->aligned);
833 skb_reset_transport_header(skb); 841 skb_reset_transport_header(skb);
834 ph = pnp_hdr(skb); 842 ph = pnp_hdr(skb);
835 ph->utid = 0; 843 ph->utid = 0;
836 ph->message_id = PNS_PIPE_DATA; 844 if (pn->aligned) {
845 ph->message_id = PNS_PIPE_ALIGNED_DATA;
846 ph->data[0] = 0; /* padding */
847 } else
848 ph->message_id = PNS_PIPE_DATA;
837 ph->pipe_handle = pn->pipe_handle; 849 ph->pipe_handle = pn->pipe_handle;
838 850
839 return pn_skb_send(sk, skb, &pipe_srv); 851 return pn_skb_send(sk, skb, &pipe_srv);
@@ -848,7 +860,9 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
848 int flags = msg->msg_flags; 860 int flags = msg->msg_flags;
849 int err, done; 861 int err, done;
850 862
851 if (msg->msg_flags & MSG_OOB || !(msg->msg_flags & MSG_EOR)) 863 if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
864 MSG_CMSG_COMPAT)) ||
865 !(msg->msg_flags & MSG_EOR))
852 return -EOPNOTSUPP; 866 return -EOPNOTSUPP;
853 867
854 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len, 868 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
@@ -927,6 +941,9 @@ int pep_write(struct sock *sk, struct sk_buff *skb)
927 struct sk_buff *rskb, *fs; 941 struct sk_buff *rskb, *fs;
928 int flen = 0; 942 int flen = 0;
929 943
944 if (pep_sk(sk)->aligned)
945 return pipe_skb_send(sk, skb);
946
930 rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC); 947 rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
931 if (!rskb) { 948 if (!rskb) {
932 kfree_skb(skb); 949 kfree_skb(skb);
@@ -966,6 +983,10 @@ static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
966 struct sk_buff *skb; 983 struct sk_buff *skb;
967 int err; 984 int err;
968 985
986 if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL|
987 MSG_NOSIGNAL|MSG_CMSG_COMPAT))
988 return -EOPNOTSUPP;
989
969 if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE))) 990 if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE)))
970 return -ENOTCONN; 991 return -ENOTCONN;
971 992
@@ -973,6 +994,8 @@ static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
973 /* Dequeue and acknowledge control request */ 994 /* Dequeue and acknowledge control request */
974 struct pep_sock *pn = pep_sk(sk); 995 struct pep_sock *pn = pep_sk(sk);
975 996
997 if (flags & MSG_PEEK)
998 return -EOPNOTSUPP;
976 skb = skb_dequeue(&pn->ctrlreq_queue); 999 skb = skb_dequeue(&pn->ctrlreq_queue);
977 if (skb) { 1000 if (skb) {
978 pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR, 1001 pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index bc4a33bf2d3d..5c6ae0c701c0 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -107,8 +107,7 @@ static void phonet_device_destroy(struct net_device *dev)
107 if (pnd) { 107 if (pnd) {
108 u8 addr; 108 u8 addr;
109 109
110 for (addr = find_first_bit(pnd->addrs, 64); addr < 64; 110 for_each_set_bit(addr, pnd->addrs, 64)
111 addr = find_next_bit(pnd->addrs, 64, 1+addr))
112 phonet_address_notify(RTM_DELADDR, dev, addr); 111 phonet_address_notify(RTM_DELADDR, dev, addr);
113 kfree(pnd); 112 kfree(pnd);
114 } 113 }
@@ -311,7 +310,7 @@ static struct notifier_block phonet_device_notifier = {
311}; 310};
312 311
313/* Per-namespace Phonet devices handling */ 312/* Per-namespace Phonet devices handling */
314static int phonet_init_net(struct net *net) 313static int __net_init phonet_init_net(struct net *net)
315{ 314{
316 struct phonet_net *pnn = net_generic(net, phonet_net_id); 315 struct phonet_net *pnn = net_generic(net, phonet_net_id);
317 316
@@ -324,7 +323,7 @@ static int phonet_init_net(struct net *net)
324 return 0; 323 return 0;
325} 324}
326 325
327static void phonet_exit_net(struct net *net) 326static void __net_exit phonet_exit_net(struct net *net)
328{ 327{
329 struct phonet_net *pnn = net_generic(net, phonet_net_id); 328 struct phonet_net *pnn = net_generic(net, phonet_net_id);
330 struct net_device *dev; 329 struct net_device *dev;
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index 2e6c7eb8e76a..fe2e7088ee07 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -141,8 +141,7 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
141 continue; 141 continue;
142 142
143 addr_idx = 0; 143 addr_idx = 0;
144 for (addr = find_first_bit(pnd->addrs, 64); addr < 64; 144 for_each_set_bit(addr, pnd->addrs, 64) {
145 addr = find_next_bit(pnd->addrs, 64, 1+addr)) {
146 if (addr_idx++ < addr_start_idx) 145 if (addr_idx++ < addr_start_idx)
147 continue; 146 continue;
148 147
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 536ebe5d3f6b..3b8992361042 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -182,8 +182,8 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
182 ic = conn->c_transport_data; 182 ic = conn->c_transport_data;
183 dev_addr = &ic->i_cm_id->route.addr.dev_addr; 183 dev_addr = &ic->i_cm_id->route.addr.dev_addr;
184 184
185 ib_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); 185 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
186 ib_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); 186 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
187 187
188 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); 188 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
189 iinfo->max_send_wr = ic->i_send_ring.w_nr; 189 iinfo->max_send_wr = ic->i_send_ring.w_nr;
diff --git a/net/rds/iw.c b/net/rds/iw.c
index db224f7c2937..b28fa8525b24 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -184,8 +184,8 @@ static int rds_iw_conn_info_visitor(struct rds_connection *conn,
184 ic = conn->c_transport_data; 184 ic = conn->c_transport_data;
185 dev_addr = &ic->i_cm_id->route.addr.dev_addr; 185 dev_addr = &ic->i_cm_id->route.addr.dev_addr;
186 186
187 ib_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); 187 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
188 ib_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); 188 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
189 189
190 rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client); 190 rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
191 iinfo->max_send_wr = ic->i_send_ring.w_nr; 191 iinfo->max_send_wr = ic->i_send_ring.w_nr;
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 211522f9a9a2..056256285987 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -90,8 +90,8 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
90 90
91 ret = sock->ops->bind(sock, (struct sockaddr *)&src, sizeof(src)); 91 ret = sock->ops->bind(sock, (struct sockaddr *)&src, sizeof(src));
92 if (ret) { 92 if (ret) {
93 rdsdebug("bind failed with %d at address %u.%u.%u.%u\n", 93 rdsdebug("bind failed with %d at address %pI4\n",
94 ret, NIPQUAD(conn->c_laddr)); 94 ret, &conn->c_laddr);
95 goto out; 95 goto out;
96 } 96 }
97 97
@@ -108,8 +108,7 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
108 O_NONBLOCK); 108 O_NONBLOCK);
109 sock = NULL; 109 sock = NULL;
110 110
111 rdsdebug("connect to address %u.%u.%u.%u returned %d\n", 111 rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret);
112 NIPQUAD(conn->c_faddr), ret);
113 if (ret == -EINPROGRESS) 112 if (ret == -EINPROGRESS)
114 ret = 0; 113 ret = 0;
115 114
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 45474a436862..53cb1b54165d 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -66,9 +66,9 @@ static int rds_tcp_accept_one(struct socket *sock)
66 66
67 inet = inet_sk(new_sock->sk); 67 inet = inet_sk(new_sock->sk);
68 68
69 rdsdebug("accepted tcp %u.%u.%u.%u:%u -> %u.%u.%u.%u:%u\n", 69 rdsdebug("accepted tcp %pI4:%u -> %pI4:%u\n",
70 NIPQUAD(inet->inet_saddr), ntohs(inet->inet_sport), 70 &inet->inet_saddr, ntohs(inet->inet_sport),
71 NIPQUAD(inet->inet_daddr), ntohs(inet->inet_dport)); 71 &inet->inet_daddr, ntohs(inet->inet_dport));
72 72
73 conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr, 73 conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr,
74 &rds_tcp_transport, GFP_KERNEL); 74 &rds_tcp_transport, GFP_KERNEL);
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index ab545e0cd5d6..34fdcc059e54 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -193,9 +193,9 @@ out:
193 rds_tcp_stats_inc(s_tcp_sndbuf_full); 193 rds_tcp_stats_inc(s_tcp_sndbuf_full);
194 ret = 0; 194 ret = 0;
195 } else { 195 } else {
196 printk(KERN_WARNING "RDS/tcp: send to %u.%u.%u.%u " 196 printk(KERN_WARNING "RDS/tcp: send to %pI4 "
197 "returned %d, disconnecting and reconnecting\n", 197 "returned %d, disconnecting and reconnecting\n",
198 NIPQUAD(conn->c_faddr), ret); 198 &conn->c_faddr, ret);
199 rds_conn_drop(conn); 199 rds_conn_drop(conn);
200 } 200 }
201 } 201 }
diff --git a/net/rfkill/input.c b/net/rfkill/input.c
index a7295ad5f9cb..3713d7ecab96 100644
--- a/net/rfkill/input.c
+++ b/net/rfkill/input.c
@@ -212,6 +212,9 @@ static void rfkill_event(struct input_handle *handle, unsigned int type,
212 case KEY_WIMAX: 212 case KEY_WIMAX:
213 rfkill_schedule_toggle(RFKILL_TYPE_WIMAX); 213 rfkill_schedule_toggle(RFKILL_TYPE_WIMAX);
214 break; 214 break;
215 case KEY_RFKILL:
216 rfkill_schedule_toggle(RFKILL_TYPE_ALL);
217 break;
215 } 218 }
216 } else if (type == EV_SW && code == SW_RFKILL_ALL) 219 } else if (type == EV_SW && code == SW_RFKILL_ALL)
217 rfkill_schedule_evsw_rfkillall(data); 220 rfkill_schedule_evsw_rfkillall(data);
@@ -295,6 +298,11 @@ static const struct input_device_id rfkill_ids[] = {
295 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) }, 298 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
296 }, 299 },
297 { 300 {
301 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
302 .evbit = { BIT_MASK(EV_KEY) },
303 .keybit = { [BIT_WORD(KEY_RFKILL)] = BIT_MASK(KEY_RFKILL) },
304 },
305 {
298 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT, 306 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
299 .evbit = { BIT(EV_SW) }, 307 .evbit = { BIT(EV_SW) },
300 .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) }, 308 .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 8feb9e5d6623..e90b9b6c16ae 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1404,29 +1404,13 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1404static void *rose_info_start(struct seq_file *seq, loff_t *pos) 1404static void *rose_info_start(struct seq_file *seq, loff_t *pos)
1405 __acquires(rose_list_lock) 1405 __acquires(rose_list_lock)
1406{ 1406{
1407 int i;
1408 struct sock *s;
1409 struct hlist_node *node;
1410
1411 spin_lock_bh(&rose_list_lock); 1407 spin_lock_bh(&rose_list_lock);
1412 if (*pos == 0) 1408 return seq_hlist_start_head(&rose_list, *pos);
1413 return SEQ_START_TOKEN;
1414
1415 i = 1;
1416 sk_for_each(s, node, &rose_list) {
1417 if (i == *pos)
1418 return s;
1419 ++i;
1420 }
1421 return NULL;
1422} 1409}
1423 1410
1424static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos) 1411static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
1425{ 1412{
1426 ++*pos; 1413 return seq_hlist_next(v, &rose_list, pos);
1427
1428 return (v == SEQ_START_TOKEN) ? sk_head(&rose_list)
1429 : sk_next((struct sock *)v);
1430} 1414}
1431 1415
1432static void rose_info_stop(struct seq_file *seq, void *v) 1416static void rose_info_stop(struct seq_file *seq, void *v)
@@ -1444,7 +1428,7 @@ static int rose_info_show(struct seq_file *seq, void *v)
1444 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n"); 1428 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
1445 1429
1446 else { 1430 else {
1447 struct sock *s = v; 1431 struct sock *s = sk_entry(v);
1448 struct rose_sock *rose = rose_sk(s); 1432 struct rose_sock *rose = rose_sk(s);
1449 const char *devname, *callsign; 1433 const char *devname, *callsign;
1450 const struct net_device *dev = rose->device; 1434 const struct net_device *dev = rose->device;
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
index bd86a63960ce..5ef5f6988a2e 100644
--- a/net/rose/rose_link.c
+++ b/net/rose/rose_link.c
@@ -101,13 +101,17 @@ static void rose_t0timer_expiry(unsigned long param)
101static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) 101static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
102{ 102{
103 ax25_address *rose_call; 103 ax25_address *rose_call;
104 ax25_cb *ax25s;
104 105
105 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) 106 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
106 rose_call = (ax25_address *)neigh->dev->dev_addr; 107 rose_call = (ax25_address *)neigh->dev->dev_addr;
107 else 108 else
108 rose_call = &rose_callsign; 109 rose_call = &rose_callsign;
109 110
111 ax25s = neigh->ax25;
110 neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); 112 neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
113 if (ax25s)
114 ax25_cb_put(ax25s);
111 115
112 return (neigh->ax25 != NULL); 116 return (neigh->ax25 != NULL);
113} 117}
@@ -120,13 +124,17 @@ static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
120static int rose_link_up(struct rose_neigh *neigh) 124static int rose_link_up(struct rose_neigh *neigh)
121{ 125{
122 ax25_address *rose_call; 126 ax25_address *rose_call;
127 ax25_cb *ax25s;
123 128
124 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) 129 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0)
125 rose_call = (ax25_address *)neigh->dev->dev_addr; 130 rose_call = (ax25_address *)neigh->dev->dev_addr;
126 else 131 else
127 rose_call = &rose_callsign; 132 rose_call = &rose_callsign;
128 133
134 ax25s = neigh->ax25;
129 neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); 135 neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev);
136 if (ax25s)
137 ax25_cb_put(ax25s);
130 138
131 return (neigh->ax25 != NULL); 139 return (neigh->ax25 != NULL);
132} 140}
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index 114df6eec8c3..968e8bac1b5d 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -75,7 +75,7 @@ static void rose_loopback_timer(unsigned long param)
75 lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); 75 lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
76 frametype = skb->data[2]; 76 frametype = skb->data[2];
77 dest = (rose_address *)(skb->data + 4); 77 dest = (rose_address *)(skb->data + 4);
78 lci_o = 0xFFF - lci_i; 78 lci_o = ROSE_DEFAULT_MAXVC + 1 - lci_i;
79 79
80 skb_reset_transport_header(skb); 80 skb_reset_transport_header(skb);
81 81
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 795c4b025e31..70a0b3b4b4d2 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -235,6 +235,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
235 235
236 if ((s = rose_neigh_list) == rose_neigh) { 236 if ((s = rose_neigh_list) == rose_neigh) {
237 rose_neigh_list = rose_neigh->next; 237 rose_neigh_list = rose_neigh->next;
238 if (rose_neigh->ax25)
239 ax25_cb_put(rose_neigh->ax25);
238 kfree(rose_neigh->digipeat); 240 kfree(rose_neigh->digipeat);
239 kfree(rose_neigh); 241 kfree(rose_neigh);
240 return; 242 return;
@@ -243,6 +245,8 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
243 while (s != NULL && s->next != NULL) { 245 while (s != NULL && s->next != NULL) {
244 if (s->next == rose_neigh) { 246 if (s->next == rose_neigh) {
245 s->next = rose_neigh->next; 247 s->next = rose_neigh->next;
248 if (rose_neigh->ax25)
249 ax25_cb_put(rose_neigh->ax25);
246 kfree(rose_neigh->digipeat); 250 kfree(rose_neigh->digipeat);
247 kfree(rose_neigh); 251 kfree(rose_neigh);
248 return; 252 return;
@@ -812,6 +816,7 @@ void rose_link_failed(ax25_cb *ax25, int reason)
812 816
813 if (rose_neigh != NULL) { 817 if (rose_neigh != NULL) {
814 rose_neigh->ax25 = NULL; 818 rose_neigh->ax25 = NULL;
819 ax25_cb_put(ax25);
815 820
816 rose_del_route_by_neigh(rose_neigh); 821 rose_del_route_by_neigh(rose_neigh);
817 rose_kill_by_neigh(rose_neigh); 822 rose_kill_by_neigh(rose_neigh);
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c
index 77228f28fa36..2d744f22a9a1 100644
--- a/net/rxrpc/ar-accept.c
+++ b/net/rxrpc/ar-accept.c
@@ -88,6 +88,11 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
88 88
89 /* get a notification message to send to the server app */ 89 /* get a notification message to send to the server app */
90 notification = alloc_skb(0, GFP_NOFS); 90 notification = alloc_skb(0, GFP_NOFS);
91 if (!notification) {
92 _debug("no memory");
93 ret = -ENOMEM;
94 goto error_nofree;
95 }
91 rxrpc_new_skb(notification); 96 rxrpc_new_skb(notification);
92 notification->mark = RXRPC_SKB_MARK_NEW_CALL; 97 notification->mark = RXRPC_SKB_MARK_NEW_CALL;
93 98
@@ -189,6 +194,7 @@ invalid_service:
189 ret = -ECONNREFUSED; 194 ret = -ECONNREFUSED;
190error: 195error:
191 rxrpc_free_skb(notification); 196 rxrpc_free_skb(notification);
197error_nofree:
192 _leave(" = %d", ret); 198 _leave(" = %d", ret);
193 return ret; 199 return ret;
194} 200}
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 929218a47620..2f691fb180d1 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -328,13 +328,16 @@ config NET_CLS_FLOW
328 module will be called cls_flow. 328 module will be called cls_flow.
329 329
330config NET_CLS_CGROUP 330config NET_CLS_CGROUP
331 bool "Control Group Classifier" 331 tristate "Control Group Classifier"
332 select NET_CLS 332 select NET_CLS
333 depends on CGROUPS 333 depends on CGROUPS
334 ---help--- 334 ---help---
335 Say Y here if you want to classify packets based on the control 335 Say Y here if you want to classify packets based on the control
336 cgroup of their process. 336 cgroup of their process.
337 337
338 To compile this code as a module, choose M here: the
339 module will be called cls_cgroup.
340
338config NET_EMATCH 341config NET_EMATCH
339 bool "Extended Matches" 342 bool "Extended Matches"
340 select NET_CLS 343 select NET_CLS
@@ -433,7 +436,7 @@ config NET_ACT_POLICE
433 module. 436 module.
434 437
435 To compile this code as a module, choose M here: the 438 To compile this code as a module, choose M here: the
436 module will be called police. 439 module will be called act_police.
437 440
438config NET_ACT_GACT 441config NET_ACT_GACT
439 tristate "Generic actions" 442 tristate "Generic actions"
@@ -443,7 +446,7 @@ config NET_ACT_GACT
443 accepting packets. 446 accepting packets.
444 447
445 To compile this code as a module, choose M here: the 448 To compile this code as a module, choose M here: the
446 module will be called gact. 449 module will be called act_gact.
447 450
448config GACT_PROB 451config GACT_PROB
449 bool "Probability support" 452 bool "Probability support"
@@ -459,7 +462,7 @@ config NET_ACT_MIRRED
459 other devices. 462 other devices.
460 463
461 To compile this code as a module, choose M here: the 464 To compile this code as a module, choose M here: the
462 module will be called mirred. 465 module will be called act_mirred.
463 466
464config NET_ACT_IPT 467config NET_ACT_IPT
465 tristate "IPtables targets" 468 tristate "IPtables targets"
@@ -469,7 +472,7 @@ config NET_ACT_IPT
469 classification. 472 classification.
470 473
471 To compile this code as a module, choose M here: the 474 To compile this code as a module, choose M here: the
472 module will be called ipt. 475 module will be called act_ipt.
473 476
474config NET_ACT_NAT 477config NET_ACT_NAT
475 tristate "Stateless NAT" 478 tristate "Stateless NAT"
@@ -479,7 +482,7 @@ config NET_ACT_NAT
479 netfilter for NAT unless you know what you are doing. 482 netfilter for NAT unless you know what you are doing.
480 483
481 To compile this code as a module, choose M here: the 484 To compile this code as a module, choose M here: the
482 module will be called nat. 485 module will be called act_nat.
483 486
484config NET_ACT_PEDIT 487config NET_ACT_PEDIT
485 tristate "Packet Editing" 488 tristate "Packet Editing"
@@ -488,7 +491,7 @@ config NET_ACT_PEDIT
488 Say Y here if you want to mangle the content of packets. 491 Say Y here if you want to mangle the content of packets.
489 492
490 To compile this code as a module, choose M here: the 493 To compile this code as a module, choose M here: the
491 module will be called pedit. 494 module will be called act_pedit.
492 495
493config NET_ACT_SIMP 496config NET_ACT_SIMP
494 tristate "Simple Example (Debug)" 497 tristate "Simple Example (Debug)"
@@ -502,7 +505,7 @@ config NET_ACT_SIMP
502 If unsure, say N. 505 If unsure, say N.
503 506
504 To compile this code as a module, choose M here: the 507 To compile this code as a module, choose M here: the
505 module will be called simple. 508 module will be called act_simple.
506 509
507config NET_ACT_SKBEDIT 510config NET_ACT_SKBEDIT
508 tristate "SKB Editing" 511 tristate "SKB Editing"
@@ -513,7 +516,7 @@ config NET_ACT_SKBEDIT
513 If unsure, say N. 516 If unsure, say N.
514 517
515 To compile this code as a module, choose M here: the 518 To compile this code as a module, choose M here: the
516 module will be called skbedit. 519 module will be called act_skbedit.
517 520
518config NET_CLS_IND 521config NET_CLS_IND
519 bool "Incoming device classification" 522 bool "Incoming device classification"
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index e4877ca6727c..7f27d2c15e08 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -24,6 +24,25 @@ struct cgroup_cls_state
24 u32 classid; 24 u32 classid;
25}; 25};
26 26
27static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
28 struct cgroup *cgrp);
29static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
30static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
31
32struct cgroup_subsys net_cls_subsys = {
33 .name = "net_cls",
34 .create = cgrp_create,
35 .destroy = cgrp_destroy,
36 .populate = cgrp_populate,
37#ifdef CONFIG_NET_CLS_CGROUP
38 .subsys_id = net_cls_subsys_id,
39#else
40#define net_cls_subsys_id net_cls_subsys.subsys_id
41#endif
42 .module = THIS_MODULE,
43};
44
45
27static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp) 46static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
28{ 47{
29 return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id), 48 return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
@@ -79,14 +98,6 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
79 return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); 98 return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
80} 99}
81 100
82struct cgroup_subsys net_cls_subsys = {
83 .name = "net_cls",
84 .create = cgrp_create,
85 .destroy = cgrp_destroy,
86 .populate = cgrp_populate,
87 .subsys_id = net_cls_subsys_id,
88};
89
90struct cls_cgroup_head 101struct cls_cgroup_head
91{ 102{
92 u32 handle; 103 u32 handle;
@@ -277,12 +288,19 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
277 288
278static int __init init_cgroup_cls(void) 289static int __init init_cgroup_cls(void)
279{ 290{
280 return register_tcf_proto_ops(&cls_cgroup_ops); 291 int ret = register_tcf_proto_ops(&cls_cgroup_ops);
292 if (ret)
293 return ret;
294 ret = cgroup_load_subsys(&net_cls_subsys);
295 if (ret)
296 unregister_tcf_proto_ops(&cls_cgroup_ops);
297 return ret;
281} 298}
282 299
283static void __exit exit_cgroup_cls(void) 300static void __exit exit_cgroup_cls(void)
284{ 301{
285 unregister_tcf_proto_ops(&cls_cgroup_ops); 302 unregister_tcf_proto_ops(&cls_cgroup_ops);
303 cgroup_unload_subsys(&net_cls_subsys);
286} 304}
287 305
288module_init(init_cgroup_cls); 306module_init(init_cgroup_cls);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 75fd1c672c61..6cd491013b50 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1707,6 +1707,7 @@ static int __init pktsched_init(void)
1707{ 1707{
1708 register_qdisc(&pfifo_qdisc_ops); 1708 register_qdisc(&pfifo_qdisc_ops);
1709 register_qdisc(&bfifo_qdisc_ops); 1709 register_qdisc(&bfifo_qdisc_ops);
1710 register_qdisc(&pfifo_head_drop_qdisc_ops);
1710 register_qdisc(&mq_qdisc_ops); 1711 register_qdisc(&mq_qdisc_ops);
1711 proc_net_fops_create(&init_net, "psched", 0, &psched_fops); 1712 proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
1712 1713
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 69188e8358b4..4b0a6cc44c77 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -43,6 +43,26 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
43 return qdisc_reshape_fail(skb, sch); 43 return qdisc_reshape_fail(skb, sch);
44} 44}
45 45
46static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
47{
48 struct sk_buff *skb_head;
49 struct fifo_sched_data *q = qdisc_priv(sch);
50
51 if (likely(skb_queue_len(&sch->q) < q->limit))
52 return qdisc_enqueue_tail(skb, sch);
53
54 /* queue full, remove one skb to fulfill the limit */
55 skb_head = qdisc_dequeue_head(sch);
56 sch->bstats.bytes -= qdisc_pkt_len(skb_head);
57 sch->bstats.packets--;
58 sch->qstats.drops++;
59 kfree_skb(skb_head);
60
61 qdisc_enqueue_tail(skb, sch);
62
63 return NET_XMIT_CN;
64}
65
46static int fifo_init(struct Qdisc *sch, struct nlattr *opt) 66static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
47{ 67{
48 struct fifo_sched_data *q = qdisc_priv(sch); 68 struct fifo_sched_data *q = qdisc_priv(sch);
@@ -108,6 +128,20 @@ struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
108}; 128};
109EXPORT_SYMBOL(bfifo_qdisc_ops); 129EXPORT_SYMBOL(bfifo_qdisc_ops);
110 130
131struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
132 .id = "pfifo_head_drop",
133 .priv_size = sizeof(struct fifo_sched_data),
134 .enqueue = pfifo_tail_enqueue,
135 .dequeue = qdisc_dequeue_head,
136 .peek = qdisc_peek_head,
137 .drop = qdisc_queue_drop_head,
138 .init = fifo_init,
139 .reset = qdisc_reset_queue,
140 .change = fifo_init,
141 .dump = fifo_dump,
142 .owner = THIS_MODULE,
143};
144
111/* Pass size change message down to embedded FIFO */ 145/* Pass size change message down to embedded FIFO */
112int fifo_set_limit(struct Qdisc *q, unsigned int limit) 146int fifo_set_limit(struct Qdisc *q, unsigned int limit)
113{ 147{
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 13a6fba41077..bef133731683 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -186,7 +186,6 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
186 addr->valid = 1; 186 addr->valid = 1;
187 187
188 INIT_LIST_HEAD(&addr->list); 188 INIT_LIST_HEAD(&addr->list);
189 INIT_RCU_HEAD(&addr->rcu);
190 189
191 /* We always hold a socket lock when calling this function, 190 /* We always hold a socket lock when calling this function,
192 * and that acts as a writer synchronizing lock. 191 * and that acts as a writer synchronizing lock.
diff --git a/net/sctp/input.c b/net/sctp/input.c
index c0c973e67add..3d74b264ea22 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -75,7 +75,7 @@ static struct sctp_association *__sctp_lookup_association(
75 const union sctp_addr *peer, 75 const union sctp_addr *peer,
76 struct sctp_transport **pt); 76 struct sctp_transport **pt);
77 77
78static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb); 78static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
79 79
80 80
81/* Calculate the SCTP checksum of an SCTP packet. */ 81/* Calculate the SCTP checksum of an SCTP packet. */
@@ -265,8 +265,13 @@ int sctp_rcv(struct sk_buff *skb)
265 } 265 }
266 266
267 if (sock_owned_by_user(sk)) { 267 if (sock_owned_by_user(sk)) {
268 if (sctp_add_backlog(sk, skb)) {
269 sctp_bh_unlock_sock(sk);
270 sctp_chunk_free(chunk);
271 skb = NULL; /* sctp_chunk_free already freed the skb */
272 goto discard_release;
273 }
268 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); 274 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
269 sctp_add_backlog(sk, skb);
270 } else { 275 } else {
271 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ); 276 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
272 sctp_inq_push(&chunk->rcvr->inqueue, chunk); 277 sctp_inq_push(&chunk->rcvr->inqueue, chunk);
@@ -336,8 +341,10 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
336 sctp_bh_lock_sock(sk); 341 sctp_bh_lock_sock(sk);
337 342
338 if (sock_owned_by_user(sk)) { 343 if (sock_owned_by_user(sk)) {
339 sk_add_backlog(sk, skb); 344 if (sk_add_backlog(sk, skb))
340 backloged = 1; 345 sctp_chunk_free(chunk);
346 else
347 backloged = 1;
341 } else 348 } else
342 sctp_inq_push(inqueue, chunk); 349 sctp_inq_push(inqueue, chunk);
343 350
@@ -362,22 +369,27 @@ done:
362 return 0; 369 return 0;
363} 370}
364 371
365static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb) 372static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
366{ 373{
367 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; 374 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
368 struct sctp_ep_common *rcvr = chunk->rcvr; 375 struct sctp_ep_common *rcvr = chunk->rcvr;
376 int ret;
369 377
370 /* Hold the assoc/ep while hanging on the backlog queue. 378 ret = sk_add_backlog(sk, skb);
371 * This way, we know structures we need will not disappear from us 379 if (!ret) {
372 */ 380 /* Hold the assoc/ep while hanging on the backlog queue.
373 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) 381 * This way, we know structures we need will not disappear
374 sctp_association_hold(sctp_assoc(rcvr)); 382 * from us
375 else if (SCTP_EP_TYPE_SOCKET == rcvr->type) 383 */
376 sctp_endpoint_hold(sctp_ep(rcvr)); 384 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
377 else 385 sctp_association_hold(sctp_assoc(rcvr));
378 BUG(); 386 else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
387 sctp_endpoint_hold(sctp_ep(rcvr));
388 else
389 BUG();
390 }
391 return ret;
379 392
380 sk_add_backlog(sk, skb);
381} 393}
382 394
383/* Handle icmp frag needed error. */ 395/* Handle icmp frag needed error. */
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index cc50fbe99291..1d7ac70ba39f 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -381,7 +381,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
381 addr->a.v6.sin6_scope_id = dev->ifindex; 381 addr->a.v6.sin6_scope_id = dev->ifindex;
382 addr->valid = 1; 382 addr->valid = 1;
383 INIT_LIST_HEAD(&addr->list); 383 INIT_LIST_HEAD(&addr->list);
384 INIT_RCU_HEAD(&addr->rcu);
385 list_add_tail(&addr->list, addrlist); 384 list_add_tail(&addr->list, addrlist);
386 } 385 }
387 } 386 }
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index d093cbfeaac4..784bcc9a979d 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -40,7 +40,7 @@
40#include <net/sctp/sctp.h> 40#include <net/sctp/sctp.h>
41#include <net/ip.h> /* for snmp_fold_field */ 41#include <net/ip.h> /* for snmp_fold_field */
42 42
43static struct snmp_mib sctp_snmp_list[] = { 43static const struct snmp_mib sctp_snmp_list[] = {
44 SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB), 44 SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB),
45 SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS), 45 SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS),
46 SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS), 46 SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS),
@@ -83,7 +83,7 @@ static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
83 83
84 for (i = 0; sctp_snmp_list[i].name != NULL; i++) 84 for (i = 0; sctp_snmp_list[i].name != NULL; i++)
85 seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name, 85 seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
86 snmp_fold_field((void **)sctp_statistics, 86 snmp_fold_field((void __percpu **)sctp_statistics,
87 sctp_snmp_list[i].entry)); 87 sctp_snmp_list[i].entry));
88 88
89 return 0; 89 return 0;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index a3c8988758b1..e771690f6d5d 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -188,7 +188,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
188 addr->a.v4.sin_addr.s_addr = ifa->ifa_local; 188 addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
189 addr->valid = 1; 189 addr->valid = 1;
190 INIT_LIST_HEAD(&addr->list); 190 INIT_LIST_HEAD(&addr->list);
191 INIT_RCU_HEAD(&addr->rcu);
192 list_add_tail(&addr->list, addrlist); 191 list_add_tail(&addr->list, addrlist);
193 } 192 }
194 } 193 }
@@ -996,12 +995,13 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
996 995
997static inline int init_sctp_mibs(void) 996static inline int init_sctp_mibs(void)
998{ 997{
999 return snmp_mib_init((void**)sctp_statistics, sizeof(struct sctp_mib)); 998 return snmp_mib_init((void __percpu **)sctp_statistics,
999 sizeof(struct sctp_mib));
1000} 1000}
1001 1001
1002static inline void cleanup_sctp_mibs(void) 1002static inline void cleanup_sctp_mibs(void)
1003{ 1003{
1004 snmp_mib_free((void**)sctp_statistics); 1004 snmp_mib_free((void __percpu **)sctp_statistics);
1005} 1005}
1006 1006
1007static void sctp_v4_pf_init(void) 1007static void sctp_v4_pf_init(void)
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 4e4ca65cd320..500886bda9b4 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -475,7 +475,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
475 * used to provide an upper bound to this doubling operation. 475 * used to provide an upper bound to this doubling operation.
476 * 476 *
477 * Special Case: the first HB doesn't trigger exponential backoff. 477 * Special Case: the first HB doesn't trigger exponential backoff.
478 * The first unacknowleged HB triggers it. We do this with a flag 478 * The first unacknowledged HB triggers it. We do this with a flag
479 * that indicates that we have an outstanding HB. 479 * that indicates that we have an outstanding HB.
480 */ 480 */
481 if (!is_hb || transport->hb_sent) { 481 if (!is_hb || transport->hb_sent) {
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 89ab66e54740..dfc5c127efd4 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2087,8 +2087,7 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
2087 if (copy_from_user(&sp->autoclose, optval, optlen)) 2087 if (copy_from_user(&sp->autoclose, optval, optlen))
2088 return -EFAULT; 2088 return -EFAULT;
2089 /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */ 2089 /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
2090 if (sp->autoclose > (MAX_SCHEDULE_TIMEOUT / HZ) ) 2090 sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
2091 sp->autoclose = (__u32)(MAX_SCHEDULE_TIMEOUT / HZ) ;
2092 2091
2093 return 0; 2092 return 0;
2094} 2093}
@@ -3721,6 +3720,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3721 SCTP_DBG_OBJCNT_INC(sock); 3720 SCTP_DBG_OBJCNT_INC(sock);
3722 percpu_counter_inc(&sctp_sockets_allocated); 3721 percpu_counter_inc(&sctp_sockets_allocated);
3723 3722
3723 /* Set socket backlog limit. */
3724 sk->sk_backlog.limit = sysctl_sctp_rmem[1];
3725
3724 local_bh_disable(); 3726 local_bh_disable();
3725 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 3727 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
3726 local_bh_enable(); 3728 local_bh_enable();
@@ -6360,7 +6362,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
6360 struct sctp_association *asoc) 6362 struct sctp_association *asoc)
6361{ 6363{
6362 struct inet_sock *inet = inet_sk(sk); 6364 struct inet_sock *inet = inet_sk(sk);
6363 struct inet_sock *newinet = inet_sk(newsk); 6365 struct inet_sock *newinet;
6364 6366
6365 newsk->sk_type = sk->sk_type; 6367 newsk->sk_type = sk->sk_type;
6366 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 6368 newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
diff --git a/net/socket.c b/net/socket.c
index b94c3dd71015..f55ffe9f8c87 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -312,18 +312,6 @@ static struct file_system_type sock_fs_type = {
312 .kill_sb = kill_anon_super, 312 .kill_sb = kill_anon_super,
313}; 313};
314 314
315static int sockfs_delete_dentry(struct dentry *dentry)
316{
317 /*
318 * At creation time, we pretended this dentry was hashed
319 * (by clearing DCACHE_UNHASHED bit in d_flags)
320 * At delete time, we restore the truth : not hashed.
321 * (so that dput() can proceed correctly)
322 */
323 dentry->d_flags |= DCACHE_UNHASHED;
324 return 0;
325}
326
327/* 315/*
328 * sockfs_dname() is called from d_path(). 316 * sockfs_dname() is called from d_path().
329 */ 317 */
@@ -334,7 +322,6 @@ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen)
334} 322}
335 323
336static const struct dentry_operations sockfs_dentry_operations = { 324static const struct dentry_operations sockfs_dentry_operations = {
337 .d_delete = sockfs_delete_dentry,
338 .d_dname = sockfs_dname, 325 .d_dname = sockfs_dname,
339}; 326};
340 327
@@ -355,68 +342,55 @@ static const struct dentry_operations sockfs_dentry_operations = {
355 * but we take care of internal coherence yet. 342 * but we take care of internal coherence yet.
356 */ 343 */
357 344
358static int sock_alloc_fd(struct file **filep, int flags) 345static int sock_alloc_file(struct socket *sock, struct file **f, int flags)
359{ 346{
347 struct qstr name = { .name = "" };
348 struct path path;
349 struct file *file;
360 int fd; 350 int fd;
361 351
362 fd = get_unused_fd_flags(flags); 352 fd = get_unused_fd_flags(flags);
363 if (likely(fd >= 0)) { 353 if (unlikely(fd < 0))
364 struct file *file = get_empty_filp(); 354 return fd;
365
366 *filep = file;
367 if (unlikely(!file)) {
368 put_unused_fd(fd);
369 return -ENFILE;
370 }
371 } else
372 *filep = NULL;
373 return fd;
374}
375 355
376static int sock_attach_fd(struct socket *sock, struct file *file, int flags) 356 path.dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name);
377{ 357 if (unlikely(!path.dentry)) {
378 struct dentry *dentry; 358 put_unused_fd(fd);
379 struct qstr name = { .name = "" };
380
381 dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name);
382 if (unlikely(!dentry))
383 return -ENOMEM; 359 return -ENOMEM;
360 }
361 path.mnt = mntget(sock_mnt);
384 362
385 dentry->d_op = &sockfs_dentry_operations; 363 path.dentry->d_op = &sockfs_dentry_operations;
386 /* 364 d_instantiate(path.dentry, SOCK_INODE(sock));
387 * We dont want to push this dentry into global dentry hash table. 365 SOCK_INODE(sock)->i_fop = &socket_file_ops;
388 * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED
389 * This permits a working /proc/$pid/fd/XXX on sockets
390 */
391 dentry->d_flags &= ~DCACHE_UNHASHED;
392 d_instantiate(dentry, SOCK_INODE(sock));
393 366
394 sock->file = file; 367 file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
395 init_file(file, sock_mnt, dentry, FMODE_READ | FMODE_WRITE,
396 &socket_file_ops); 368 &socket_file_ops);
397 SOCK_INODE(sock)->i_fop = &socket_file_ops; 369 if (unlikely(!file)) {
370 /* drop dentry, keep inode */
371 atomic_inc(&path.dentry->d_inode->i_count);
372 path_put(&path);
373 put_unused_fd(fd);
374 return -ENFILE;
375 }
376
377 sock->file = file;
398 file->f_flags = O_RDWR | (flags & O_NONBLOCK); 378 file->f_flags = O_RDWR | (flags & O_NONBLOCK);
399 file->f_pos = 0; 379 file->f_pos = 0;
400 file->private_data = sock; 380 file->private_data = sock;
401 381
402 return 0; 382 *f = file;
383 return fd;
403} 384}
404 385
405int sock_map_fd(struct socket *sock, int flags) 386int sock_map_fd(struct socket *sock, int flags)
406{ 387{
407 struct file *newfile; 388 struct file *newfile;
408 int fd = sock_alloc_fd(&newfile, flags); 389 int fd = sock_alloc_file(sock, &newfile, flags);
409
410 if (likely(fd >= 0)) {
411 int err = sock_attach_fd(sock, newfile, flags);
412 390
413 if (unlikely(err < 0)) { 391 if (likely(fd >= 0))
414 put_filp(newfile);
415 put_unused_fd(fd);
416 return err;
417 }
418 fd_install(fd, newfile); 392 fd_install(fd, newfile);
419 } 393
420 return fd; 394 return fd;
421} 395}
422 396
@@ -1390,29 +1364,19 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
1390 if (err < 0) 1364 if (err < 0)
1391 goto out_release_both; 1365 goto out_release_both;
1392 1366
1393 fd1 = sock_alloc_fd(&newfile1, flags & O_CLOEXEC); 1367 fd1 = sock_alloc_file(sock1, &newfile1, flags);
1394 if (unlikely(fd1 < 0)) { 1368 if (unlikely(fd1 < 0)) {
1395 err = fd1; 1369 err = fd1;
1396 goto out_release_both; 1370 goto out_release_both;
1397 } 1371 }
1398 1372
1399 fd2 = sock_alloc_fd(&newfile2, flags & O_CLOEXEC); 1373 fd2 = sock_alloc_file(sock2, &newfile2, flags);
1400 if (unlikely(fd2 < 0)) { 1374 if (unlikely(fd2 < 0)) {
1401 err = fd2; 1375 err = fd2;
1402 put_filp(newfile1);
1403 put_unused_fd(fd1);
1404 goto out_release_both;
1405 }
1406
1407 err = sock_attach_fd(sock1, newfile1, flags & O_NONBLOCK);
1408 if (unlikely(err < 0)) {
1409 goto out_fd2;
1410 }
1411
1412 err = sock_attach_fd(sock2, newfile2, flags & O_NONBLOCK);
1413 if (unlikely(err < 0)) {
1414 fput(newfile1); 1376 fput(newfile1);
1415 goto out_fd1; 1377 put_unused_fd(fd1);
1378 sock_release(sock2);
1379 goto out;
1416 } 1380 }
1417 1381
1418 audit_fd_pair(fd1, fd2); 1382 audit_fd_pair(fd1, fd2);
@@ -1438,16 +1402,6 @@ out_release_1:
1438 sock_release(sock1); 1402 sock_release(sock1);
1439out: 1403out:
1440 return err; 1404 return err;
1441
1442out_fd2:
1443 put_filp(newfile1);
1444 sock_release(sock1);
1445out_fd1:
1446 put_filp(newfile2);
1447 sock_release(sock2);
1448 put_unused_fd(fd1);
1449 put_unused_fd(fd2);
1450 goto out;
1451} 1405}
1452 1406
1453/* 1407/*
@@ -1551,17 +1505,13 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
1551 */ 1505 */
1552 __module_get(newsock->ops->owner); 1506 __module_get(newsock->ops->owner);
1553 1507
1554 newfd = sock_alloc_fd(&newfile, flags & O_CLOEXEC); 1508 newfd = sock_alloc_file(newsock, &newfile, flags);
1555 if (unlikely(newfd < 0)) { 1509 if (unlikely(newfd < 0)) {
1556 err = newfd; 1510 err = newfd;
1557 sock_release(newsock); 1511 sock_release(newsock);
1558 goto out_put; 1512 goto out_put;
1559 } 1513 }
1560 1514
1561 err = sock_attach_fd(newsock, newfile, flags & O_NONBLOCK);
1562 if (err < 0)
1563 goto out_fd_simple;
1564
1565 err = security_socket_accept(sock, newsock); 1515 err = security_socket_accept(sock, newsock);
1566 if (err) 1516 if (err)
1567 goto out_fd; 1517 goto out_fd;
@@ -1591,11 +1541,6 @@ out_put:
1591 fput_light(sock->file, fput_needed); 1541 fput_light(sock->file, fput_needed);
1592out: 1542out:
1593 return err; 1543 return err;
1594out_fd_simple:
1595 sock_release(newsock);
1596 put_filp(newfile);
1597 put_unused_fd(newfd);
1598 goto out_put;
1599out_fd: 1544out_fd:
1600 fput(newfile); 1545 fput(newfile);
1601 put_unused_fd(newfd); 1546 put_unused_fd(newfd);
@@ -2190,6 +2135,10 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2190 break; 2135 break;
2191 ++datagrams; 2136 ++datagrams;
2192 2137
2138 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2139 if (flags & MSG_WAITFORONE)
2140 flags |= MSG_DONTWAIT;
2141
2193 if (timeout) { 2142 if (timeout) {
2194 ktime_get_ts(timeout); 2143 ktime_get_ts(timeout);
2195 *timeout = timespec_sub(end_time, *timeout); 2144 *timeout = timespec_sub(end_time, *timeout);
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 6dcdd2517819..f845d9d72f73 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -71,8 +71,9 @@ static size_t rpc_ntop6(const struct sockaddr *sap,
71 if (unlikely(len == 0)) 71 if (unlikely(len == 0))
72 return len; 72 return len;
73 73
74 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) && 74 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
75 !(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_SITELOCAL)) 75 return len;
76 if (sin6->sin6_scope_id == 0)
76 return len; 77 return len;
77 78
78 rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u", 79 rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u",
@@ -165,8 +166,7 @@ static int rpc_parse_scope_id(const char *buf, const size_t buflen,
165 if (*delim != IPV6_SCOPE_DELIMITER) 166 if (*delim != IPV6_SCOPE_DELIMITER)
166 return 0; 167 return 0;
167 168
168 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) && 169 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
169 !(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_SITELOCAL))
170 return 0; 170 return 0;
171 171
172 len = (buf + buflen) - delim - 1; 172 len = (buf + buflen) - delim - 1;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 3c3c50f38a1c..c389ccf6437d 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -206,8 +206,14 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
206 ctx->gc_win = window_size; 206 ctx->gc_win = window_size;
207 /* gssd signals an error by passing ctx->gc_win = 0: */ 207 /* gssd signals an error by passing ctx->gc_win = 0: */
208 if (ctx->gc_win == 0) { 208 if (ctx->gc_win == 0) {
209 /* in which case, p points to an error code which we ignore */ 209 /*
210 p = ERR_PTR(-EACCES); 210 * in which case, p points to an error code. Anything other
211 * than -EKEYEXPIRED gets converted to -EACCES.
212 */
213 p = simple_get_bytes(p, end, &ret, sizeof(ret));
214 if (!IS_ERR(p))
215 p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) :
216 ERR_PTR(-EACCES);
211 goto err; 217 goto err;
212 } 218 }
213 /* copy the opaque wire context */ 219 /* copy the opaque wire context */
@@ -644,7 +650,23 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
644 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); 650 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
645 if (IS_ERR(p)) { 651 if (IS_ERR(p)) {
646 err = PTR_ERR(p); 652 err = PTR_ERR(p);
647 gss_msg->msg.errno = (err == -EAGAIN) ? -EAGAIN : -EACCES; 653 switch (err) {
654 case -EACCES:
655 case -EKEYEXPIRED:
656 gss_msg->msg.errno = err;
657 err = mlen;
658 break;
659 case -EFAULT:
660 case -ENOMEM:
661 case -EINVAL:
662 case -ENOSYS:
663 gss_msg->msg.errno = -EAGAIN;
664 break;
665 default:
666 printk(KERN_CRIT "%s: bad return from "
667 "gss_fill_context: %zd\n", __func__, err);
668 BUG();
669 }
648 goto err_release_msg; 670 goto err_release_msg;
649 } 671 }
650 gss_msg->ctx = gss_get_ctx(ctx); 672 gss_msg->ctx = gss_get_ctx(ctx);
@@ -1258,9 +1280,8 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
1258 rqstp->rq_release_snd_buf = priv_release_snd_buf; 1280 rqstp->rq_release_snd_buf = priv_release_snd_buf;
1259 return 0; 1281 return 0;
1260out_free: 1282out_free:
1261 for (i--; i >= 0; i--) { 1283 rqstp->rq_enc_pages_num = i;
1262 __free_page(rqstp->rq_enc_pages[i]); 1284 priv_release_snd_buf(rqstp);
1263 }
1264out: 1285out:
1265 return -EAGAIN; 1286 return -EAGAIN;
1266} 1287}
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index ef45eba22485..2deb0ed72ff4 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -131,8 +131,10 @@ gss_import_sec_context_kerberos(const void *p,
131 struct krb5_ctx *ctx; 131 struct krb5_ctx *ctx;
132 int tmp; 132 int tmp;
133 133
134 if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) 134 if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) {
135 p = ERR_PTR(-ENOMEM);
135 goto out_err; 136 goto out_err;
137 }
136 138
137 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); 139 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
138 if (IS_ERR(p)) 140 if (IS_ERR(p))
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 6efbb0cd3c7c..76e4c6f4ac3c 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -252,7 +252,7 @@ gss_import_sec_context(const void *input_token, size_t bufsize,
252 struct gss_ctx **ctx_id) 252 struct gss_ctx **ctx_id)
253{ 253{
254 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL))) 254 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL)))
255 return GSS_S_FAILURE; 255 return -ENOMEM;
256 (*ctx_id)->mech_type = gss_mech_get(mech); 256 (*ctx_id)->mech_type = gss_mech_get(mech);
257 257
258 return mech->gm_ops 258 return mech->gm_ops
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index 13f214f53120..f0c05d3311c1 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -37,21 +37,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 37
38#define RPCDBG_FACILITY RPCDBG_SVCDSP 38#define RPCDBG_FACILITY RPCDBG_SVCDSP
39 39
40void bc_release_request(struct rpc_task *task)
41{
42 struct rpc_rqst *req = task->tk_rqstp;
43
44 dprintk("RPC: bc_release_request: task= %p\n", task);
45
46 /*
47 * Release this request only if it's a backchannel
48 * preallocated request
49 */
50 if (!bc_prealloc(req))
51 return;
52 xprt_free_bc_request(req);
53}
54
55/* Empty callback ops */ 40/* Empty callback ops */
56static const struct rpc_call_ops nfs41_callback_ops = { 41static const struct rpc_call_ops nfs41_callback_ops = {
57}; 42};
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 154034b675bd..19c9983d5360 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -659,6 +659,7 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
659 task = rpc_new_task(&task_setup_data); 659 task = rpc_new_task(&task_setup_data);
660 if (!task) { 660 if (!task) {
661 xprt_free_bc_request(req); 661 xprt_free_bc_request(req);
662 task = ERR_PTR(-ENOMEM);
662 goto out; 663 goto out;
663 } 664 }
664 task->tk_rqstp = req; 665 task->tk_rqstp = req;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 49278f830367..20e30c6f8355 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -78,7 +78,7 @@ rpc_timeout_upcall_queue(struct work_struct *work)
78} 78}
79 79
80/** 80/**
81 * rpc_queue_upcall 81 * rpc_queue_upcall - queue an upcall message to userspace
82 * @inode: inode of upcall pipe on which to queue given message 82 * @inode: inode of upcall pipe on which to queue given message
83 * @msg: message to queue 83 * @msg: message to queue
84 * 84 *
@@ -587,6 +587,8 @@ static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
587 struct dentry *dentry; 587 struct dentry *dentry;
588 588
589 dentry = __rpc_lookup_create(parent, name); 589 dentry = __rpc_lookup_create(parent, name);
590 if (IS_ERR(dentry))
591 return dentry;
590 if (dentry->d_inode == NULL) 592 if (dentry->d_inode == NULL)
591 return dentry; 593 return dentry;
592 dput(dentry); 594 dput(dentry);
@@ -999,19 +1001,14 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
999 inode = rpc_get_inode(sb, S_IFDIR | 0755); 1001 inode = rpc_get_inode(sb, S_IFDIR | 0755);
1000 if (!inode) 1002 if (!inode)
1001 return -ENOMEM; 1003 return -ENOMEM;
1002 root = d_alloc_root(inode); 1004 sb->s_root = root = d_alloc_root(inode);
1003 if (!root) { 1005 if (!root) {
1004 iput(inode); 1006 iput(inode);
1005 return -ENOMEM; 1007 return -ENOMEM;
1006 } 1008 }
1007 if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) 1009 if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL))
1008 goto out; 1010 return -ENOMEM;
1009 sb->s_root = root;
1010 return 0; 1011 return 0;
1011out:
1012 d_genocide(root);
1013 dput(root);
1014 return -ENOMEM;
1015} 1012}
1016 1013
1017static int 1014static int
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index cef74ba0666c..aae6907fd546 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -210,6 +210,7 @@ void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qnam
210{ 210{
211 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); 211 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
212} 212}
213EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
213 214
214void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) 215void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
215{ 216{
@@ -385,6 +386,20 @@ static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct r
385} 386}
386 387
387/* 388/*
389 * Tests whether rpc queue is empty
390 */
391int rpc_queue_empty(struct rpc_wait_queue *queue)
392{
393 int res;
394
395 spin_lock_bh(&queue->lock);
396 res = queue->qlen;
397 spin_unlock_bh(&queue->lock);
398 return (res == 0);
399}
400EXPORT_SYMBOL_GPL(rpc_queue_empty);
401
402/*
388 * Wake up a task on a specific queue 403 * Wake up a task on a specific queue
389 */ 404 */
390void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) 405void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 538ca433a56c..8420a4205b76 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -133,7 +133,7 @@ svc_pool_map_choose_mode(void)
133 return SVC_POOL_PERNODE; 133 return SVC_POOL_PERNODE;
134 } 134 }
135 135
136 node = any_online_node(node_online_map); 136 node = first_online_node;
137 if (nr_cpus_node(node) > 2) { 137 if (nr_cpus_node(node) > 2) {
138 /* 138 /*
139 * Non-trivial SMP, or CONFIG_NUMA on 139 * Non-trivial SMP, or CONFIG_NUMA on
@@ -506,6 +506,10 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
506{ 506{
507 unsigned int pages, arghi; 507 unsigned int pages, arghi;
508 508
509 /* bc_xprt uses fore channel allocated buffers */
510 if (svc_is_backchannel(rqstp))
511 return 1;
512
509 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. 513 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
510 * We assume one is at most one page 514 * We assume one is at most one page
511 */ 515 */
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index b845e2293dfe..8f0f1fb3dc52 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -16,8 +16,6 @@
16 16
17#define RPCDBG_FACILITY RPCDBG_SVCXPRT 17#define RPCDBG_FACILITY RPCDBG_SVCXPRT
18 18
19#define SVC_MAX_WAKING 5
20
21static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); 19static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
22static int svc_deferred_recv(struct svc_rqst *rqstp); 20static int svc_deferred_recv(struct svc_rqst *rqstp);
23static struct cache_deferred_req *svc_defer(struct cache_req *req); 21static struct cache_deferred_req *svc_defer(struct cache_req *req);
@@ -175,11 +173,13 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
175 .sin_addr.s_addr = htonl(INADDR_ANY), 173 .sin_addr.s_addr = htonl(INADDR_ANY),
176 .sin_port = htons(port), 174 .sin_port = htons(port),
177 }; 175 };
176#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
178 struct sockaddr_in6 sin6 = { 177 struct sockaddr_in6 sin6 = {
179 .sin6_family = AF_INET6, 178 .sin6_family = AF_INET6,
180 .sin6_addr = IN6ADDR_ANY_INIT, 179 .sin6_addr = IN6ADDR_ANY_INIT,
181 .sin6_port = htons(port), 180 .sin6_port = htons(port),
182 }; 181 };
182#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
183 struct sockaddr *sap; 183 struct sockaddr *sap;
184 size_t len; 184 size_t len;
185 185
@@ -188,10 +188,12 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
188 sap = (struct sockaddr *)&sin; 188 sap = (struct sockaddr *)&sin;
189 len = sizeof(sin); 189 len = sizeof(sin);
190 break; 190 break;
191#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
191 case PF_INET6: 192 case PF_INET6:
192 sap = (struct sockaddr *)&sin6; 193 sap = (struct sockaddr *)&sin6;
193 len = sizeof(sin6); 194 len = sizeof(sin6);
194 break; 195 break;
196#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
195 default: 197 default:
196 return ERR_PTR(-EAFNOSUPPORT); 198 return ERR_PTR(-EAFNOSUPPORT);
197 } 199 }
@@ -233,7 +235,10 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
233 err: 235 err:
234 spin_unlock(&svc_xprt_class_lock); 236 spin_unlock(&svc_xprt_class_lock);
235 dprintk("svc: transport %s not found\n", xprt_name); 237 dprintk("svc: transport %s not found\n", xprt_name);
236 return -ENOENT; 238
239 /* This errno is exposed to user space. Provide a reasonable
240 * perror msg for a bad transport. */
241 return -EPROTONOSUPPORT;
237} 242}
238EXPORT_SYMBOL_GPL(svc_create_xprt); 243EXPORT_SYMBOL_GPL(svc_create_xprt);
239 244
@@ -306,7 +311,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
306 struct svc_pool *pool; 311 struct svc_pool *pool;
307 struct svc_rqst *rqstp; 312 struct svc_rqst *rqstp;
308 int cpu; 313 int cpu;
309 int thread_avail;
310 314
311 if (!(xprt->xpt_flags & 315 if (!(xprt->xpt_flags &
312 ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) 316 ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
@@ -318,6 +322,12 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
318 322
319 spin_lock_bh(&pool->sp_lock); 323 spin_lock_bh(&pool->sp_lock);
320 324
325 if (!list_empty(&pool->sp_threads) &&
326 !list_empty(&pool->sp_sockets))
327 printk(KERN_ERR
328 "svc_xprt_enqueue: "
329 "threads and transports both waiting??\n");
330
321 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) { 331 if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
322 /* Don't enqueue dead transports */ 332 /* Don't enqueue dead transports */
323 dprintk("svc: transport %p is dead, not enqueued\n", xprt); 333 dprintk("svc: transport %p is dead, not enqueued\n", xprt);
@@ -358,15 +368,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
358 } 368 }
359 369
360 process: 370 process:
361 /* Work out whether threads are available */ 371 if (!list_empty(&pool->sp_threads)) {
362 thread_avail = !list_empty(&pool->sp_threads); /* threads are asleep */
363 if (pool->sp_nwaking >= SVC_MAX_WAKING) {
364 /* too many threads are runnable and trying to wake up */
365 thread_avail = 0;
366 pool->sp_stats.overloads_avoided++;
367 }
368
369 if (thread_avail) {
370 rqstp = list_entry(pool->sp_threads.next, 372 rqstp = list_entry(pool->sp_threads.next,
371 struct svc_rqst, 373 struct svc_rqst,
372 rq_list); 374 rq_list);
@@ -381,8 +383,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
381 svc_xprt_get(xprt); 383 svc_xprt_get(xprt);
382 rqstp->rq_reserved = serv->sv_max_mesg; 384 rqstp->rq_reserved = serv->sv_max_mesg;
383 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); 385 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
384 rqstp->rq_waking = 1;
385 pool->sp_nwaking++;
386 pool->sp_stats.threads_woken++; 386 pool->sp_stats.threads_woken++;
387 BUG_ON(xprt->xpt_pool != pool); 387 BUG_ON(xprt->xpt_pool != pool);
388 wake_up(&rqstp->rq_wait); 388 wake_up(&rqstp->rq_wait);
@@ -651,11 +651,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
651 return -EINTR; 651 return -EINTR;
652 652
653 spin_lock_bh(&pool->sp_lock); 653 spin_lock_bh(&pool->sp_lock);
654 if (rqstp->rq_waking) {
655 rqstp->rq_waking = 0;
656 pool->sp_nwaking--;
657 BUG_ON(pool->sp_nwaking < 0);
658 }
659 xprt = svc_xprt_dequeue(pool); 654 xprt = svc_xprt_dequeue(pool);
660 if (xprt) { 655 if (xprt) {
661 rqstp->rq_xprt = xprt; 656 rqstp->rq_xprt = xprt;
@@ -711,7 +706,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
711 spin_unlock_bh(&pool->sp_lock); 706 spin_unlock_bh(&pool->sp_lock);
712 707
713 len = 0; 708 len = 0;
714 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { 709 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
710 dprintk("svc_recv: found XPT_CLOSE\n");
711 svc_delete_xprt(xprt);
712 } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
715 struct svc_xprt *newxpt; 713 struct svc_xprt *newxpt;
716 newxpt = xprt->xpt_ops->xpo_accept(xprt); 714 newxpt = xprt->xpt_ops->xpo_accept(xprt);
717 if (newxpt) { 715 if (newxpt) {
@@ -737,7 +735,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
737 svc_xprt_received(newxpt); 735 svc_xprt_received(newxpt);
738 } 736 }
739 svc_xprt_received(xprt); 737 svc_xprt_received(xprt);
740 } else if (!test_bit(XPT_CLOSE, &xprt->xpt_flags)) { 738 } else {
741 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", 739 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
742 rqstp, pool->sp_id, xprt, 740 rqstp, pool->sp_id, xprt,
743 atomic_read(&xprt->xpt_ref.refcount)); 741 atomic_read(&xprt->xpt_ref.refcount));
@@ -750,11 +748,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
750 dprintk("svc: got len=%d\n", len); 748 dprintk("svc: got len=%d\n", len);
751 } 749 }
752 750
753 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
754 dprintk("svc_recv: found XPT_CLOSE\n");
755 svc_delete_xprt(xprt);
756 }
757
758 /* No data, incomplete (TCP) read, or accept() */ 751 /* No data, incomplete (TCP) read, or accept() */
759 if (len == 0 || len == -EAGAIN) { 752 if (len == 0 || len == -EAGAIN) {
760 rqstp->rq_res.len = 0; 753 rqstp->rq_res.len = 0;
@@ -900,11 +893,8 @@ void svc_delete_xprt(struct svc_xprt *xprt)
900 if (test_bit(XPT_TEMP, &xprt->xpt_flags)) 893 if (test_bit(XPT_TEMP, &xprt->xpt_flags))
901 serv->sv_tmpcnt--; 894 serv->sv_tmpcnt--;
902 895
903 for (dr = svc_deferred_dequeue(xprt); dr; 896 while ((dr = svc_deferred_dequeue(xprt)) != NULL)
904 dr = svc_deferred_dequeue(xprt)) {
905 svc_xprt_put(xprt);
906 kfree(dr); 897 kfree(dr);
907 }
908 898
909 svc_xprt_put(xprt); 899 svc_xprt_put(xprt);
910 spin_unlock_bh(&serv->sv_lock); 900 spin_unlock_bh(&serv->sv_lock);
@@ -1204,16 +1194,15 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
1204 struct svc_pool *pool = p; 1194 struct svc_pool *pool = p;
1205 1195
1206 if (p == SEQ_START_TOKEN) { 1196 if (p == SEQ_START_TOKEN) {
1207 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n"); 1197 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n");
1208 return 0; 1198 return 0;
1209 } 1199 }
1210 1200
1211 seq_printf(m, "%u %lu %lu %lu %lu %lu\n", 1201 seq_printf(m, "%u %lu %lu %lu %lu\n",
1212 pool->sp_id, 1202 pool->sp_id,
1213 pool->sp_stats.packets, 1203 pool->sp_stats.packets,
1214 pool->sp_stats.sockets_queued, 1204 pool->sp_stats.sockets_queued,
1215 pool->sp_stats.threads_woken, 1205 pool->sp_stats.threads_woken,
1216 pool->sp_stats.overloads_avoided,
1217 pool->sp_stats.threads_timedout); 1206 pool->sp_stats.threads_timedout);
1218 1207
1219 return 0; 1208 return 0;
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 4a8f6558718a..afdcb0459a83 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -15,6 +15,7 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#define RPCDBG_FACILITY RPCDBG_AUTH 16#define RPCDBG_FACILITY RPCDBG_AUTH
17 17
18#include <linux/sunrpc/clnt.h>
18 19
19/* 20/*
20 * AUTHUNIX and AUTHNULL credentials are both handled here. 21 * AUTHUNIX and AUTHNULL credentials are both handled here.
@@ -187,10 +188,13 @@ static int ip_map_parse(struct cache_detail *cd,
187 * for scratch: */ 188 * for scratch: */
188 char *buf = mesg; 189 char *buf = mesg;
189 int len; 190 int len;
190 int b1, b2, b3, b4, b5, b6, b7, b8;
191 char c;
192 char class[8]; 191 char class[8];
193 struct in6_addr addr; 192 union {
193 struct sockaddr sa;
194 struct sockaddr_in s4;
195 struct sockaddr_in6 s6;
196 } address;
197 struct sockaddr_in6 sin6;
194 int err; 198 int err;
195 199
196 struct ip_map *ipmp; 200 struct ip_map *ipmp;
@@ -209,24 +213,24 @@ static int ip_map_parse(struct cache_detail *cd,
209 len = qword_get(&mesg, buf, mlen); 213 len = qword_get(&mesg, buf, mlen);
210 if (len <= 0) return -EINVAL; 214 if (len <= 0) return -EINVAL;
211 215
212 if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) == 4) { 216 if (rpc_pton(buf, len, &address.sa, sizeof(address)) == 0)
213 addr.s6_addr32[0] = 0; 217 return -EINVAL;
214 addr.s6_addr32[1] = 0; 218 switch (address.sa.sa_family) {
215 addr.s6_addr32[2] = htonl(0xffff); 219 case AF_INET:
216 addr.s6_addr32[3] = 220 /* Form a mapped IPv4 address in sin6 */
217 htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4); 221 memset(&sin6, 0, sizeof(sin6));
218 } else if (sscanf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x%c", 222 sin6.sin6_family = AF_INET6;
219 &b1, &b2, &b3, &b4, &b5, &b6, &b7, &b8, &c) == 8) { 223 sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
220 addr.s6_addr16[0] = htons(b1); 224 sin6.sin6_addr.s6_addr32[3] = address.s4.sin_addr.s_addr;
221 addr.s6_addr16[1] = htons(b2); 225 break;
222 addr.s6_addr16[2] = htons(b3); 226#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
223 addr.s6_addr16[3] = htons(b4); 227 case AF_INET6:
224 addr.s6_addr16[4] = htons(b5); 228 memcpy(&sin6, &address.s6, sizeof(sin6));
225 addr.s6_addr16[5] = htons(b6); 229 break;
226 addr.s6_addr16[6] = htons(b7); 230#endif
227 addr.s6_addr16[7] = htons(b8); 231 default:
228 } else
229 return -EINVAL; 232 return -EINVAL;
233 }
230 234
231 expiry = get_expiry(&mesg); 235 expiry = get_expiry(&mesg);
232 if (expiry ==0) 236 if (expiry ==0)
@@ -243,7 +247,8 @@ static int ip_map_parse(struct cache_detail *cd,
243 } else 247 } else
244 dom = NULL; 248 dom = NULL;
245 249
246 ipmp = ip_map_lookup(class, &addr); 250 /* IPv6 scope IDs are ignored for now */
251 ipmp = ip_map_lookup(class, &sin6.sin6_addr);
247 if (ipmp) { 252 if (ipmp) {
248 err = ip_map_update(ipmp, 253 err = ip_map_update(ipmp,
249 container_of(dom, struct unix_domain, h), 254 container_of(dom, struct unix_domain, h),
@@ -619,7 +624,7 @@ static int unix_gid_show(struct seq_file *m,
619 else 624 else
620 glen = 0; 625 glen = 0;
621 626
622 seq_printf(m, "%d %d:", ug->uid, glen); 627 seq_printf(m, "%u %d:", ug->uid, glen);
623 for (i = 0; i < glen; i++) 628 for (i = 0; i < glen; i++)
624 seq_printf(m, " %d", GROUP_AT(ug->gi, i)); 629 seq_printf(m, " %d", GROUP_AT(ug->gi, i));
625 seq_printf(m, "\n"); 630 seq_printf(m, "\n");
@@ -655,23 +660,25 @@ static struct unix_gid *unix_gid_lookup(uid_t uid)
655 return NULL; 660 return NULL;
656} 661}
657 662
658static int unix_gid_find(uid_t uid, struct group_info **gip, 663static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
659 struct svc_rqst *rqstp)
660{ 664{
661 struct unix_gid *ug = unix_gid_lookup(uid); 665 struct unix_gid *ug;
666 struct group_info *gi;
667 int ret;
668
669 ug = unix_gid_lookup(uid);
662 if (!ug) 670 if (!ug)
663 return -EAGAIN; 671 return ERR_PTR(-EAGAIN);
664 switch (cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle)) { 672 ret = cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle);
673 switch (ret) {
665 case -ENOENT: 674 case -ENOENT:
666 *gip = NULL; 675 return ERR_PTR(-ENOENT);
667 return 0;
668 case 0: 676 case 0:
669 *gip = ug->gi; 677 gi = get_group_info(ug->gi);
670 get_group_info(*gip);
671 cache_put(&ug->h, &unix_gid_cache); 678 cache_put(&ug->h, &unix_gid_cache);
672 return 0; 679 return gi;
673 default: 680 default:
674 return -EAGAIN; 681 return ERR_PTR(-EAGAIN);
675 } 682 }
676} 683}
677 684
@@ -681,6 +688,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
681 struct sockaddr_in *sin; 688 struct sockaddr_in *sin;
682 struct sockaddr_in6 *sin6, sin6_storage; 689 struct sockaddr_in6 *sin6, sin6_storage;
683 struct ip_map *ipm; 690 struct ip_map *ipm;
691 struct group_info *gi;
692 struct svc_cred *cred = &rqstp->rq_cred;
684 693
685 switch (rqstp->rq_addr.ss_family) { 694 switch (rqstp->rq_addr.ss_family) {
686 case AF_INET: 695 case AF_INET:
@@ -721,6 +730,17 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
721 ip_map_cached_put(rqstp, ipm); 730 ip_map_cached_put(rqstp, ipm);
722 break; 731 break;
723 } 732 }
733
734 gi = unix_gid_find(cred->cr_uid, rqstp);
735 switch (PTR_ERR(gi)) {
736 case -EAGAIN:
737 return SVC_DROP;
738 case -ENOENT:
739 break;
740 default:
741 put_group_info(cred->cr_group_info);
742 cred->cr_group_info = gi;
743 }
724 return SVC_OK; 744 return SVC_OK;
725} 745}
726 746
@@ -817,19 +837,11 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
817 slen = svc_getnl(argv); /* gids length */ 837 slen = svc_getnl(argv); /* gids length */
818 if (slen > 16 || (len -= (slen + 2)*4) < 0) 838 if (slen > 16 || (len -= (slen + 2)*4) < 0)
819 goto badcred; 839 goto badcred;
820 if (unix_gid_find(cred->cr_uid, &cred->cr_group_info, rqstp) 840 cred->cr_group_info = groups_alloc(slen);
821 == -EAGAIN) 841 if (cred->cr_group_info == NULL)
822 return SVC_DROP; 842 return SVC_DROP;
823 if (cred->cr_group_info == NULL) { 843 for (i = 0; i < slen; i++)
824 cred->cr_group_info = groups_alloc(slen); 844 GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
825 if (cred->cr_group_info == NULL)
826 return SVC_DROP;
827 for (i = 0; i < slen; i++)
828 GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
829 } else {
830 for (i = 0; i < slen ; i++)
831 svc_getnl(argv);
832 }
833 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { 845 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
834 *authp = rpc_autherr_badverf; 846 *authp = rpc_autherr_badverf;
835 return SVC_DENIED; 847 return SVC_DENIED;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 870929e08e5d..a29f259204e6 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -968,6 +968,7 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
968 return len; 968 return len;
969 err_delete: 969 err_delete:
970 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); 970 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
971 svc_xprt_received(&svsk->sk_xprt);
971 err_again: 972 err_again:
972 return -EAGAIN; 973 return -EAGAIN;
973} 974}
@@ -1357,7 +1358,7 @@ int svc_addsock(struct svc_serv *serv, const int fd, char *name_return,
1357 1358
1358 if (!so) 1359 if (!so)
1359 return err; 1360 return err;
1360 if (so->sk->sk_family != AF_INET) 1361 if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6))
1361 err = -EAFNOSUPPORT; 1362 err = -EAFNOSUPPORT;
1362 else if (so->sk->sk_protocol != IPPROTO_TCP && 1363 else if (so->sk->sk_protocol != IPPROTO_TCP &&
1363 so->sk->sk_protocol != IPPROTO_UDP) 1364 so->sk->sk_protocol != IPPROTO_UDP)
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 469de292c23c..42f09ade0044 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -46,6 +46,7 @@
46 46
47#include <linux/sunrpc/clnt.h> 47#include <linux/sunrpc/clnt.h>
48#include <linux/sunrpc/metrics.h> 48#include <linux/sunrpc/metrics.h>
49#include <linux/sunrpc/bc_xprt.h>
49 50
50#include "sunrpc.h" 51#include "sunrpc.h"
51 52
@@ -1032,21 +1033,16 @@ void xprt_release(struct rpc_task *task)
1032 if (req->rq_release_snd_buf) 1033 if (req->rq_release_snd_buf)
1033 req->rq_release_snd_buf(req); 1034 req->rq_release_snd_buf(req);
1034 1035
1035 /*
1036 * Early exit if this is a backchannel preallocated request.
1037 * There is no need to have it added to the RPC slot list.
1038 */
1039 if (is_bc_request)
1040 return;
1041
1042 memset(req, 0, sizeof(*req)); /* mark unused */
1043
1044 dprintk("RPC: %5u release request %p\n", task->tk_pid, req); 1036 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1037 if (likely(!is_bc_request)) {
1038 memset(req, 0, sizeof(*req)); /* mark unused */
1045 1039
1046 spin_lock(&xprt->reserve_lock); 1040 spin_lock(&xprt->reserve_lock);
1047 list_add(&req->rq_list, &xprt->free); 1041 list_add(&req->rq_list, &xprt->free);
1048 rpc_wake_up_next(&xprt->backlog); 1042 rpc_wake_up_next(&xprt->backlog);
1049 spin_unlock(&xprt->reserve_lock); 1043 spin_unlock(&xprt->reserve_lock);
1044 } else
1045 xprt_free_bc_request(req);
1050} 1046}
1051 1047
1052/** 1048/**
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 7018eef1dcdd..f96c2fe6137b 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -160,16 +160,15 @@ xprt_rdma_format_addresses(struct rpc_xprt *xprt)
160 (void)rpc_ntop(sap, buf, sizeof(buf)); 160 (void)rpc_ntop(sap, buf, sizeof(buf));
161 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); 161 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
162 162
163 (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); 163 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
164 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 164 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
165 165
166 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; 166 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma";
167 167
168 (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x", 168 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
169 NIPQUAD(sin->sin_addr.s_addr));
170 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 169 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
171 170
172 (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); 171 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
173 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 172 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
174 173
175 /* netid */ 174 /* netid */
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 3d739e5d15d8..9847c30b5001 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -297,12 +297,11 @@ static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
297 switch (sap->sa_family) { 297 switch (sap->sa_family) {
298 case AF_INET: 298 case AF_INET:
299 sin = xs_addr_in(xprt); 299 sin = xs_addr_in(xprt);
300 (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x", 300 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
301 NIPQUAD(sin->sin_addr.s_addr));
302 break; 301 break;
303 case AF_INET6: 302 case AF_INET6:
304 sin6 = xs_addr_in6(xprt); 303 sin6 = xs_addr_in6(xprt);
305 (void)snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); 304 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
306 break; 305 break;
307 default: 306 default:
308 BUG(); 307 BUG();
@@ -315,10 +314,10 @@ static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
315 struct sockaddr *sap = xs_addr(xprt); 314 struct sockaddr *sap = xs_addr(xprt);
316 char buf[128]; 315 char buf[128];
317 316
318 (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); 317 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
319 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 318 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
320 319
321 (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); 320 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
322 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 321 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
323} 322}
324 323
@@ -549,8 +548,6 @@ static int xs_udp_send_request(struct rpc_task *task)
549 /* Still some bytes left; set up for a retry later. */ 548 /* Still some bytes left; set up for a retry later. */
550 status = -EAGAIN; 549 status = -EAGAIN;
551 } 550 }
552 if (!transport->sock)
553 goto out;
554 551
555 switch (status) { 552 switch (status) {
556 case -ENOTSOCK: 553 case -ENOTSOCK:
@@ -570,7 +567,7 @@ static int xs_udp_send_request(struct rpc_task *task)
570 * prompts ECONNREFUSED. */ 567 * prompts ECONNREFUSED. */
571 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 568 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
572 } 569 }
573out: 570
574 return status; 571 return status;
575} 572}
576 573
@@ -652,8 +649,6 @@ static int xs_tcp_send_request(struct rpc_task *task)
652 status = -EAGAIN; 649 status = -EAGAIN;
653 break; 650 break;
654 } 651 }
655 if (!transport->sock)
656 goto out;
657 652
658 switch (status) { 653 switch (status) {
659 case -ENOTSOCK: 654 case -ENOTSOCK:
@@ -673,7 +668,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
673 case -ENOTCONN: 668 case -ENOTCONN:
674 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 669 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
675 } 670 }
676out: 671
677 return status; 672 return status;
678} 673}
679 674
@@ -1912,6 +1907,11 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
1912 case -EALREADY: 1907 case -EALREADY:
1913 xprt_clear_connecting(xprt); 1908 xprt_clear_connecting(xprt);
1914 return; 1909 return;
1910 case -EINVAL:
1911 /* Happens, for instance, if the user specified a link
1912 * local IPv6 address without a scope-id.
1913 */
1914 goto out;
1915 } 1915 }
1916out_eagain: 1916out_eagain:
1917 status = -EAGAIN; 1917 status = -EAGAIN;
@@ -2100,7 +2100,7 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2100 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want 2100 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2101 * to use the server side send routines. 2101 * to use the server side send routines.
2102 */ 2102 */
2103void *bc_malloc(struct rpc_task *task, size_t size) 2103static void *bc_malloc(struct rpc_task *task, size_t size)
2104{ 2104{
2105 struct page *page; 2105 struct page *page;
2106 struct rpc_buffer *buf; 2106 struct rpc_buffer *buf;
@@ -2120,7 +2120,7 @@ void *bc_malloc(struct rpc_task *task, size_t size)
2120/* 2120/*
2121 * Free the space allocated in the bc_alloc routine 2121 * Free the space allocated in the bc_alloc routine
2122 */ 2122 */
2123void bc_free(void *buffer) 2123static void bc_free(void *buffer)
2124{ 2124{
2125 struct rpc_buffer *buf; 2125 struct rpc_buffer *buf;
2126 2126
@@ -2251,9 +2251,6 @@ static struct rpc_xprt_ops xs_tcp_ops = {
2251 .buf_free = rpc_free, 2251 .buf_free = rpc_free,
2252 .send_request = xs_tcp_send_request, 2252 .send_request = xs_tcp_send_request,
2253 .set_retrans_timeout = xprt_set_retrans_timeout_def, 2253 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2254#if defined(CONFIG_NFS_V4_1)
2255 .release_request = bc_release_request,
2256#endif /* CONFIG_NFS_V4_1 */
2257 .close = xs_tcp_close, 2254 .close = xs_tcp_close,
2258 .destroy = xs_destroy, 2255 .destroy = xs_destroy,
2259 .print_stats = xs_tcp_print_stats, 2256 .print_stats = xs_tcp_print_stats,
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 0b15d7250c40..53196009160a 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -71,7 +71,7 @@ static struct ctl_table_root net_sysctl_ro_root = {
71 .permissions = net_ctl_ro_header_perms, 71 .permissions = net_ctl_ro_header_perms,
72}; 72};
73 73
74static int sysctl_net_init(struct net *net) 74static int __net_init sysctl_net_init(struct net *net)
75{ 75{
76 setup_sysctl_set(&net->sysctls, 76 setup_sysctl_set(&net->sysctls,
77 &net_sysctl_ro_root.default_set, 77 &net_sysctl_ro_root.default_set,
@@ -79,7 +79,7 @@ static int sysctl_net_init(struct net *net)
79 return 0; 79 return 0;
80} 80}
81 81
82static void sysctl_net_exit(struct net *net) 82static void __net_exit sysctl_net_exit(struct net *net)
83{ 83{
84 WARN_ON(!list_empty(&net->sysctls.list)); 84 WARN_ON(!list_empty(&net->sysctls.list));
85 return; 85 return;
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index 3b30d1130b61..b74f78d0c033 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -10,7 +10,7 @@ menuconfig TIPC
10 specially designed for intra cluster communication. This protocol 10 specially designed for intra cluster communication. This protocol
11 originates from Ericsson where it has been used in carrier grade 11 originates from Ericsson where it has been used in carrier grade
12 cluster applications for many years. 12 cluster applications for many years.
13 13
14 For more information about TIPC, see http://tipc.sourceforge.net. 14 For more information about TIPC, see http://tipc.sourceforge.net.
15 15
16 This protocol support is also available as a module ( = code which 16 This protocol support is also available as a module ( = code which
@@ -23,91 +23,76 @@ menuconfig TIPC
23if TIPC 23if TIPC
24 24
25config TIPC_ADVANCED 25config TIPC_ADVANCED
26 bool "TIPC: Advanced configuration" 26 bool "Advanced TIPC configuration"
27 default n 27 default n
28 help 28 help
29 Saying Y here will open some advanced configuration 29 Saying Y here will open some advanced configuration for TIPC.
30 for TIPC. Most users do not need to bother, so if 30 Most users do not need to bother; if unsure, just say N.
31 unsure, just say N.
32 31
33config TIPC_ZONES 32config TIPC_ZONES
34 int "Maximum number of zones in network" 33 int "Maximum number of zones in a network"
35 depends on TIPC_ADVANCED 34 depends on TIPC_ADVANCED
35 range 1 255
36 default "3" 36 default "3"
37 help 37 help
38 Max number of zones inside TIPC network. Max supported value 38 Specifies how many zones can be supported in a TIPC network.
39 is 255 zones, minimum is 1 39 Can range from 1 to 255 zones; default is 3.
40 40
41 Default is 3 zones in a network; setting this to higher 41 Setting this to a smaller value saves some memory;
42 allows more zones but might use more memory. 42 setting it to a higher value allows for more zones.
43 43
44config TIPC_CLUSTERS 44config TIPC_CLUSTERS
45 int "Maximum number of clusters in a zone" 45 int "Maximum number of clusters in a zone"
46 depends on TIPC_ADVANCED 46 depends on TIPC_ADVANCED
47 range 1 1
47 default "1" 48 default "1"
48 help 49 help
49 ***Only 1 (one cluster in a zone) is supported by current code. 50 Specifies how many clusters can be supported in a TIPC zone.
50 Any value set here will be overridden.***
51
52 (Max number of clusters inside TIPC zone. Max supported
53 value is 4095 clusters, minimum is 1.
54 51
55 Default is 1; setting this to smaller value might save 52 *** Currently TIPC only supports a single cluster per zone. ***
56 some memory, setting it to higher
57 allows more clusters and might consume more memory.)
58 53
59config TIPC_NODES 54config TIPC_NODES
60 int "Maximum number of nodes in cluster" 55 int "Maximum number of nodes in a cluster"
61 depends on TIPC_ADVANCED 56 depends on TIPC_ADVANCED
57 range 8 2047
62 default "255" 58 default "255"
63 help 59 help
64 Maximum number of nodes inside a TIPC cluster. Maximum 60 Specifies how many nodes can be supported in a TIPC cluster.
65 supported value is 2047 nodes, minimum is 8. 61 Can range from 8 to 2047 nodes; default is 255.
66
67 Setting this to a smaller value saves some memory,
68 setting it to higher allows more nodes.
69
70config TIPC_SLAVE_NODES
71 int "Maximum number of slave nodes in cluster"
72 depends on TIPC_ADVANCED
73 default "0"
74 help
75 ***This capability is not supported by current code.***
76
77 Maximum number of slave nodes inside a TIPC cluster. Maximum
78 supported value is 2047 nodes, minimum is 0.
79 62
80 Setting this to a smaller value saves some memory, 63 Setting this to a smaller value saves some memory;
81 setting it to higher allows more nodes. 64 setting it to higher allows for more nodes.
82 65
83config TIPC_PORTS 66config TIPC_PORTS
84 int "Maximum number of ports in a node" 67 int "Maximum number of ports in a node"
85 depends on TIPC_ADVANCED 68 depends on TIPC_ADVANCED
69 range 127 65535
86 default "8191" 70 default "8191"
87 help 71 help
88 Maximum number of ports within a node. Maximum 72 Specifies how many ports can be supported by a node.
89 supported value is 64535 nodes, minimum is 127. 73 Can range from 127 to 65535 ports; default is 8191.
90 74
91 Setting this to a smaller value saves some memory, 75 Setting this to a smaller value saves some memory,
92 setting it to higher allows more ports. 76 setting it to higher allows for more ports.
93 77
94config TIPC_LOG 78config TIPC_LOG
95 int "Size of log buffer" 79 int "Size of log buffer"
96 depends on TIPC_ADVANCED 80 depends on TIPC_ADVANCED
97 default 0 81 range 0 32768
82 default "0"
98 help 83 help
99 Size (in bytes) of TIPC's internal log buffer, which records the 84 Size (in bytes) of TIPC's internal log buffer, which records the
100 occurrence of significant events. Maximum supported value 85 occurrence of significant events. Can range from 0 to 32768 bytes;
101 is 32768 bytes, minimum is 0. 86 default is 0.
102 87
103 There is no need to enable the log buffer unless the node will be 88 There is no need to enable the log buffer unless the node will be
104 managed remotely via TIPC. 89 managed remotely via TIPC.
105 90
106config TIPC_DEBUG 91config TIPC_DEBUG
107 bool "Enable debugging support" 92 bool "Enable debug messages"
108 default n 93 default n
109 help 94 help
110 This will enable debugging of TIPC. 95 This enables debugging of TIPC.
111 96
112 Only say Y here if you are having trouble with TIPC. It will 97 Only say Y here if you are having trouble with TIPC. It will
113 enable the display of detailed information about what is going on. 98 enable the display of detailed information about what is going on.
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 327011fcc407..78091375ca12 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -45,10 +45,10 @@
45 45
46#define MAX_ADDR_STR 32 46#define MAX_ADDR_STR 32
47 47
48static struct media *media_list = NULL; 48static struct media media_list[MAX_MEDIA];
49static u32 media_count = 0; 49static u32 media_count = 0;
50 50
51struct bearer *tipc_bearers = NULL; 51struct bearer tipc_bearers[MAX_BEARERS];
52 52
53/** 53/**
54 * media_name_valid - validate media name 54 * media_name_valid - validate media name
@@ -108,9 +108,11 @@ int tipc_register_media(u32 media_type,
108 int res = -EINVAL; 108 int res = -EINVAL;
109 109
110 write_lock_bh(&tipc_net_lock); 110 write_lock_bh(&tipc_net_lock);
111 if (!media_list)
112 goto exit;
113 111
112 if (tipc_mode != TIPC_NET_MODE) {
113 warn("Media <%s> rejected, not in networked mode yet\n", name);
114 goto exit;
115 }
114 if (!media_name_valid(name)) { 116 if (!media_name_valid(name)) {
115 warn("Media <%s> rejected, illegal name\n", name); 117 warn("Media <%s> rejected, illegal name\n", name);
116 goto exit; 118 goto exit;
@@ -660,33 +662,10 @@ int tipc_disable_bearer(const char *name)
660 662
661 663
662 664
663int tipc_bearer_init(void)
664{
665 int res;
666
667 write_lock_bh(&tipc_net_lock);
668 tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC);
669 media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC);
670 if (tipc_bearers && media_list) {
671 res = 0;
672 } else {
673 kfree(tipc_bearers);
674 kfree(media_list);
675 tipc_bearers = NULL;
676 media_list = NULL;
677 res = -ENOMEM;
678 }
679 write_unlock_bh(&tipc_net_lock);
680 return res;
681}
682
683void tipc_bearer_stop(void) 665void tipc_bearer_stop(void)
684{ 666{
685 u32 i; 667 u32 i;
686 668
687 if (!tipc_bearers)
688 return;
689
690 for (i = 0; i < MAX_BEARERS; i++) { 669 for (i = 0; i < MAX_BEARERS; i++) {
691 if (tipc_bearers[i].active) 670 if (tipc_bearers[i].active)
692 tipc_bearers[i].publ.blocked = 1; 671 tipc_bearers[i].publ.blocked = 1;
@@ -695,10 +674,6 @@ void tipc_bearer_stop(void)
695 if (tipc_bearers[i].active) 674 if (tipc_bearers[i].active)
696 bearer_disable(tipc_bearers[i].publ.name); 675 bearer_disable(tipc_bearers[i].publ.name);
697 } 676 }
698 kfree(tipc_bearers);
699 kfree(media_list);
700 tipc_bearers = NULL;
701 media_list = NULL;
702 media_count = 0; 677 media_count = 0;
703} 678}
704 679
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index ca5734892713..000228e93f9e 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -114,7 +114,7 @@ struct bearer_name {
114 114
115struct link; 115struct link;
116 116
117extern struct bearer *tipc_bearers; 117extern struct bearer tipc_bearers[];
118 118
119void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a); 119void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
120struct sk_buff *tipc_media_get_names(void); 120struct sk_buff *tipc_media_get_names(void);
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 3256bd7d398f..52c571fedbe0 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -189,11 +189,11 @@ static int __init tipc_init(void)
189 tipc_remote_management = 1; 189 tipc_remote_management = 1;
190 tipc_max_publications = 10000; 190 tipc_max_publications = 10000;
191 tipc_max_subscriptions = 2000; 191 tipc_max_subscriptions = 2000;
192 tipc_max_ports = delimit(CONFIG_TIPC_PORTS, 127, 65536); 192 tipc_max_ports = CONFIG_TIPC_PORTS;
193 tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 255); 193 tipc_max_zones = CONFIG_TIPC_ZONES;
194 tipc_max_clusters = delimit(CONFIG_TIPC_CLUSTERS, 1, 1); 194 tipc_max_clusters = CONFIG_TIPC_CLUSTERS;
195 tipc_max_nodes = delimit(CONFIG_TIPC_NODES, 8, 2047); 195 tipc_max_nodes = CONFIG_TIPC_NODES;
196 tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047); 196 tipc_max_slaves = CONFIG_TIPC_SLAVE_NODES;
197 tipc_net_id = 4711; 197 tipc_net_id = 4711;
198 198
199 if ((res = tipc_core_start())) 199 if ((res = tipc_core_start()))
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 6f50f6423f63..1a7e4665af80 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1882,6 +1882,15 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1882 (msg_destnode(msg) != tipc_own_addr))) 1882 (msg_destnode(msg) != tipc_own_addr)))
1883 goto cont; 1883 goto cont;
1884 1884
1885 /* Discard non-routeable messages destined for another node */
1886
1887 if (unlikely(!msg_isdata(msg) &&
1888 (msg_destnode(msg) != tipc_own_addr))) {
1889 if ((msg_user(msg) != CONN_MANAGER) &&
1890 (msg_user(msg) != MSG_FRAGMENTER))
1891 goto cont;
1892 }
1893
1885 /* Locate unicast link endpoint that should handle message */ 1894 /* Locate unicast link endpoint that should handle message */
1886 1895
1887 n_ptr = tipc_node_find(msg_prevnode(msg)); 1896 n_ptr = tipc_node_find(msg_prevnode(msg));
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 7906608bf510..f25b1cdb64eb 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -116,7 +116,8 @@
116*/ 116*/
117 117
118DEFINE_RWLOCK(tipc_net_lock); 118DEFINE_RWLOCK(tipc_net_lock);
119struct network tipc_net = { NULL }; 119struct _zone *tipc_zones[256] = { NULL, };
120struct network tipc_net = { tipc_zones };
120 121
121struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) 122struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
122{ 123{
@@ -158,28 +159,12 @@ void tipc_net_send_external_routes(u32 dest)
158 } 159 }
159} 160}
160 161
161static int net_init(void)
162{
163 memset(&tipc_net, 0, sizeof(tipc_net));
164 tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC);
165 if (!tipc_net.zones) {
166 return -ENOMEM;
167 }
168 return 0;
169}
170
171static void net_stop(void) 162static void net_stop(void)
172{ 163{
173 u32 z_num; 164 u32 z_num;
174 165
175 if (!tipc_net.zones) 166 for (z_num = 1; z_num <= tipc_max_zones; z_num++)
176 return;
177
178 for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
179 tipc_zone_delete(tipc_net.zones[z_num]); 167 tipc_zone_delete(tipc_net.zones[z_num]);
180 }
181 kfree(tipc_net.zones);
182 tipc_net.zones = NULL;
183} 168}
184 169
185static void net_route_named_msg(struct sk_buff *buf) 170static void net_route_named_msg(struct sk_buff *buf)
@@ -282,9 +267,7 @@ int tipc_net_start(u32 addr)
282 tipc_named_reinit(); 267 tipc_named_reinit();
283 tipc_port_reinit(); 268 tipc_port_reinit();
284 269
285 if ((res = tipc_bearer_init()) || 270 if ((res = tipc_cltr_init()) ||
286 (res = net_init()) ||
287 (res = tipc_cltr_init()) ||
288 (res = tipc_bclink_init())) { 271 (res = tipc_bclink_init())) {
289 return res; 272 return res;
290 } 273 }
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 414fc34b8bea..8dea66500cf5 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -153,11 +153,11 @@ void tipc_ref_table_stop(void)
153 153
154u32 tipc_ref_acquire(void *object, spinlock_t **lock) 154u32 tipc_ref_acquire(void *object, spinlock_t **lock)
155{ 155{
156 struct reference *entry;
157 u32 index; 156 u32 index;
158 u32 index_mask; 157 u32 index_mask;
159 u32 next_plus_upper; 158 u32 next_plus_upper;
160 u32 ref; 159 u32 ref;
160 struct reference *entry = NULL;
161 161
162 if (!object) { 162 if (!object) {
163 err("Attempt to acquire reference to non-existent object\n"); 163 err("Attempt to acquire reference to non-existent object\n");
@@ -175,30 +175,36 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
175 index = tipc_ref_table.first_free; 175 index = tipc_ref_table.first_free;
176 entry = &(tipc_ref_table.entries[index]); 176 entry = &(tipc_ref_table.entries[index]);
177 index_mask = tipc_ref_table.index_mask; 177 index_mask = tipc_ref_table.index_mask;
178 /* take lock in case a previous user of entry still holds it */
179 spin_lock_bh(&entry->lock);
180 next_plus_upper = entry->ref; 178 next_plus_upper = entry->ref;
181 tipc_ref_table.first_free = next_plus_upper & index_mask; 179 tipc_ref_table.first_free = next_plus_upper & index_mask;
182 ref = (next_plus_upper & ~index_mask) + index; 180 ref = (next_plus_upper & ~index_mask) + index;
183 entry->ref = ref;
184 entry->object = object;
185 *lock = &entry->lock;
186 } 181 }
187 else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { 182 else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
188 index = tipc_ref_table.init_point++; 183 index = tipc_ref_table.init_point++;
189 entry = &(tipc_ref_table.entries[index]); 184 entry = &(tipc_ref_table.entries[index]);
190 spin_lock_init(&entry->lock); 185 spin_lock_init(&entry->lock);
191 spin_lock_bh(&entry->lock);
192 ref = tipc_ref_table.start_mask + index; 186 ref = tipc_ref_table.start_mask + index;
193 entry->ref = ref;
194 entry->object = object;
195 *lock = &entry->lock;
196 } 187 }
197 else { 188 else {
198 ref = 0; 189 ref = 0;
199 } 190 }
200 write_unlock_bh(&ref_table_lock); 191 write_unlock_bh(&ref_table_lock);
201 192
193 /*
194 * Grab the lock so no one else can modify this entry
195 * While we assign its ref value & object pointer
196 */
197 if (entry) {
198 spin_lock_bh(&entry->lock);
199 entry->ref = ref;
200 entry->object = object;
201 *lock = &entry->lock;
202 /*
203 * keep it locked, the caller is responsible
204 * for unlocking this when they're done with it
205 */
206 }
207
202 return ref; 208 return ref;
203} 209}
204 210
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 1ea64f09cc45..4b235fc1c70f 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1322,8 +1322,10 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1322 if (!sock_owned_by_user(sk)) { 1322 if (!sock_owned_by_user(sk)) {
1323 res = filter_rcv(sk, buf); 1323 res = filter_rcv(sk, buf);
1324 } else { 1324 } else {
1325 sk_add_backlog(sk, buf); 1325 if (sk_add_backlog(sk, buf))
1326 res = TIPC_OK; 1326 res = TIPC_ERR_OVERLOAD;
1327 else
1328 res = TIPC_OK;
1327 } 1329 }
1328 bh_unlock_sock(sk); 1330 bh_unlock_sock(sk);
1329 1331
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index ac91f0dfa144..ff123e56114a 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -76,19 +76,6 @@ struct top_srv {
76static struct top_srv topsrv = { 0 }; 76static struct top_srv topsrv = { 0 };
77 77
78/** 78/**
79 * htohl - convert value to endianness used by destination
80 * @in: value to convert
81 * @swap: non-zero if endianness must be reversed
82 *
83 * Returns converted value
84 */
85
86static u32 htohl(u32 in, int swap)
87{
88 return swap ? swab32(in) : in;
89}
90
91/**
92 * subscr_send_event - send a message containing a tipc_event to the subscriber 79 * subscr_send_event - send a message containing a tipc_event to the subscriber
93 * 80 *
94 * Note: Must not hold subscriber's server port lock, since tipc_send() will 81 * Note: Must not hold subscriber's server port lock, since tipc_send() will
@@ -107,11 +94,11 @@ static void subscr_send_event(struct subscription *sub,
107 msg_sect.iov_base = (void *)&sub->evt; 94 msg_sect.iov_base = (void *)&sub->evt;
108 msg_sect.iov_len = sizeof(struct tipc_event); 95 msg_sect.iov_len = sizeof(struct tipc_event);
109 96
110 sub->evt.event = htohl(event, sub->swap); 97 sub->evt.event = htonl(event);
111 sub->evt.found_lower = htohl(found_lower, sub->swap); 98 sub->evt.found_lower = htonl(found_lower);
112 sub->evt.found_upper = htohl(found_upper, sub->swap); 99 sub->evt.found_upper = htonl(found_upper);
113 sub->evt.port.ref = htohl(port_ref, sub->swap); 100 sub->evt.port.ref = htonl(port_ref);
114 sub->evt.port.node = htohl(node, sub->swap); 101 sub->evt.port.node = htonl(node);
115 tipc_send(sub->server_ref, 1, &msg_sect); 102 tipc_send(sub->server_ref, 1, &msg_sect);
116} 103}
117 104
@@ -287,16 +274,23 @@ static void subscr_cancel(struct tipc_subscr *s,
287{ 274{
288 struct subscription *sub; 275 struct subscription *sub;
289 struct subscription *sub_temp; 276 struct subscription *sub_temp;
277 __u32 type, lower, upper;
290 int found = 0; 278 int found = 0;
291 279
292 /* Find first matching subscription, exit if not found */ 280 /* Find first matching subscription, exit if not found */
293 281
282 type = ntohl(s->seq.type);
283 lower = ntohl(s->seq.lower);
284 upper = ntohl(s->seq.upper);
285
294 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, 286 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
295 subscription_list) { 287 subscription_list) {
296 if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) { 288 if ((type == sub->seq.type) &&
297 found = 1; 289 (lower == sub->seq.lower) &&
298 break; 290 (upper == sub->seq.upper)) {
299 } 291 found = 1;
292 break;
293 }
300 } 294 }
301 if (!found) 295 if (!found)
302 return; 296 return;
@@ -325,16 +319,10 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
325 struct subscriber *subscriber) 319 struct subscriber *subscriber)
326{ 320{
327 struct subscription *sub; 321 struct subscription *sub;
328 int swap;
329
330 /* Determine subscriber's endianness */
331
332 swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE));
333 322
334 /* Detect & process a subscription cancellation request */ 323 /* Detect & process a subscription cancellation request */
335 324
336 if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) { 325 if (ntohl(s->filter) & TIPC_SUB_CANCEL) {
337 s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
338 subscr_cancel(s, subscriber); 326 subscr_cancel(s, subscriber);
339 return NULL; 327 return NULL;
340 } 328 }
@@ -359,11 +347,11 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
359 347
360 /* Initialize subscription object */ 348 /* Initialize subscription object */
361 349
362 sub->seq.type = htohl(s->seq.type, swap); 350 sub->seq.type = ntohl(s->seq.type);
363 sub->seq.lower = htohl(s->seq.lower, swap); 351 sub->seq.lower = ntohl(s->seq.lower);
364 sub->seq.upper = htohl(s->seq.upper, swap); 352 sub->seq.upper = ntohl(s->seq.upper);
365 sub->timeout = htohl(s->timeout, swap); 353 sub->timeout = ntohl(s->timeout);
366 sub->filter = htohl(s->filter, swap); 354 sub->filter = ntohl(s->filter);
367 if ((!(sub->filter & TIPC_SUB_PORTS) == 355 if ((!(sub->filter & TIPC_SUB_PORTS) ==
368 !(sub->filter & TIPC_SUB_SERVICE)) || 356 !(sub->filter & TIPC_SUB_SERVICE)) ||
369 (sub->seq.lower > sub->seq.upper)) { 357 (sub->seq.lower > sub->seq.upper)) {
@@ -376,7 +364,6 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
376 INIT_LIST_HEAD(&sub->nameseq_list); 364 INIT_LIST_HEAD(&sub->nameseq_list);
377 list_add(&sub->subscription_list, &subscriber->subscription_list); 365 list_add(&sub->subscription_list, &subscriber->subscription_list);
378 sub->server_ref = subscriber->port_ref; 366 sub->server_ref = subscriber->port_ref;
379 sub->swap = swap;
380 memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr)); 367 memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
381 atomic_inc(&topsrv.subscription_count); 368 atomic_inc(&topsrv.subscription_count);
382 if (sub->timeout != TIPC_WAIT_FOREVER) { 369 if (sub->timeout != TIPC_WAIT_FOREVER) {
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 45d89bf4d202..c20f496d95b2 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -53,7 +53,6 @@ typedef void (*tipc_subscr_event) (struct subscription *sub,
53 * @nameseq_list: adjacent subscriptions in name sequence's subscription list 53 * @nameseq_list: adjacent subscriptions in name sequence's subscription list
54 * @subscription_list: adjacent subscriptions in subscriber's subscription list 54 * @subscription_list: adjacent subscriptions in subscriber's subscription list
55 * @server_ref: object reference of server port associated with subscription 55 * @server_ref: object reference of server port associated with subscription
56 * @swap: indicates if subscriber uses opposite endianness in its messages
57 * @evt: template for events generated by subscription 56 * @evt: template for events generated by subscription
58 */ 57 */
59 58
@@ -66,7 +65,6 @@ struct subscription {
66 struct list_head nameseq_list; 65 struct list_head nameseq_list;
67 struct list_head subscription_list; 66 struct list_head subscription_list;
68 u32 server_ref; 67 u32 server_ref;
69 int swap;
70 struct tipc_event evt; 68 struct tipc_event evt;
71}; 69};
72 70
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index f25511903115..3d9122e78f41 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -144,7 +144,7 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
144/* 144/*
145 * SMP locking strategy: 145 * SMP locking strategy:
146 * hash table is protected with spinlock unix_table_lock 146 * hash table is protected with spinlock unix_table_lock
147 * each socket state is protected by separate rwlock. 147 * each socket state is protected by separate spin lock.
148 */ 148 */
149 149
150static inline unsigned unix_hash_fold(__wsum n) 150static inline unsigned unix_hash_fold(__wsum n)
@@ -2224,7 +2224,7 @@ static const struct net_proto_family unix_family_ops = {
2224}; 2224};
2225 2225
2226 2226
2227static int unix_net_init(struct net *net) 2227static int __net_init unix_net_init(struct net *net)
2228{ 2228{
2229 int error = -ENOMEM; 2229 int error = -ENOMEM;
2230 2230
@@ -2243,7 +2243,7 @@ out:
2243 return error; 2243 return error;
2244} 2244}
2245 2245
2246static void unix_net_exit(struct net *net) 2246static void __net_exit unix_net_exit(struct net *net)
2247{ 2247{
2248 unix_sysctl_unregister(net); 2248 unix_sysctl_unregister(net);
2249 proc_net_remove(net, "unix"); 2249 proc_net_remove(net, "unix");
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index 708f5df6b7f0..d095c7be10d0 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -31,7 +31,7 @@ static struct ctl_path unix_path[] = {
31 { }, 31 { },
32}; 32};
33 33
34int unix_sysctl_register(struct net *net) 34int __net_init unix_sysctl_register(struct net *net)
35{ 35{
36 struct ctl_table *table; 36 struct ctl_table *table;
37 37
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index d3bfb6ef13ae..7718657e93dc 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -320,8 +320,7 @@ int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name,
320EXPORT_SYMBOL_GPL(wimax_msg); 320EXPORT_SYMBOL_GPL(wimax_msg);
321 321
322 322
323static const 323static const struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = {
324struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = {
325 [WIMAX_GNL_MSG_IFIDX] = { 324 [WIMAX_GNL_MSG_IFIDX] = {
326 .type = NLA_U32, 325 .type = NLA_U32,
327 }, 326 },
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
index 35f370091f4f..4dc82a54ba30 100644
--- a/net/wimax/op-reset.c
+++ b/net/wimax/op-reset.c
@@ -91,8 +91,7 @@ int wimax_reset(struct wimax_dev *wimax_dev)
91EXPORT_SYMBOL(wimax_reset); 91EXPORT_SYMBOL(wimax_reset);
92 92
93 93
94static const 94static const struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = {
95struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = {
96 [WIMAX_GNL_RESET_IFIDX] = { 95 [WIMAX_GNL_RESET_IFIDX] = {
97 .type = NLA_U32, 96 .type = NLA_U32,
98 }, 97 },
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index ae752a64d920..e978c7136c97 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -410,8 +410,7 @@ void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
410 * just query). 410 * just query).
411 */ 411 */
412 412
413static const 413static const struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = {
414struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = {
415 [WIMAX_GNL_RFKILL_IFIDX] = { 414 [WIMAX_GNL_RFKILL_IFIDX] = {
416 .type = NLA_U32, 415 .type = NLA_U32,
417 }, 416 },
diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c
index a76b8fcb056d..11ad3356eb56 100644
--- a/net/wimax/op-state-get.c
+++ b/net/wimax/op-state-get.c
@@ -33,8 +33,7 @@
33#include "debug-levels.h" 33#include "debug-levels.h"
34 34
35 35
36static const 36static const struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1] = {
37struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1] = {
38 [WIMAX_GNL_STGET_IFIDX] = { 37 [WIMAX_GNL_STGET_IFIDX] = {
39 .type = NLA_U32, 38 .type = NLA_U32,
40 }, 39 },
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index c8866412f830..813e1eaea29b 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -75,8 +75,7 @@ MODULE_PARM_DESC(debug,
75 * close to where the data is generated. 75 * close to where the data is generated.
76 */ 76 */
77/* 77/*
78static const 78static const struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = {
79struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = {
80 [WIMAX_GNL_STCH_STATE_OLD] = { .type = NLA_U8 }, 79 [WIMAX_GNL_STCH_STATE_OLD] = { .type = NLA_U8 },
81 [WIMAX_GNL_STCH_STATE_NEW] = { .type = NLA_U8 }, 80 [WIMAX_GNL_STCH_STATE_NEW] = { .type = NLA_U8 },
82}; 81};
diff --git a/net/wireless/.gitignore b/net/wireless/.gitignore
new file mode 100644
index 000000000000..c33451b896d9
--- /dev/null
+++ b/net/wireless/.gitignore
@@ -0,0 +1 @@
regdb.c
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 90e93a5701aa..d0ee29063e5d 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -94,20 +94,21 @@ config CFG80211_DEBUGFS
94 94
95 If unsure, say N. 95 If unsure, say N.
96 96
97config WIRELESS_OLD_REGULATORY 97config CFG80211_INTERNAL_REGDB
98 bool "Old wireless static regulatory definitions" 98 bool "use statically compiled regulatory rules database" if EMBEDDED
99 default n 99 default n
100 depends on CFG80211 100 depends on CFG80211
101 ---help--- 101 ---help---
102 This option enables the old static regulatory information 102 This option generates an internal data structure representing
103 and uses it within the new framework. This option is available 103 the wireless regulatory rules described in net/wireless/db.txt
104 for historical reasons and it is advised to leave it off. 104 and includes code to query that database. This is an alternative
105 to using CRDA for defining regulatory rules for the kernel.
105 106
106 For details see: 107 For details see:
107 108
108 http://wireless.kernel.org/en/developers/Regulatory 109 http://wireless.kernel.org/en/developers/Regulatory
109 110
110 Say N and if you say Y, please tell us why. The default is N. 111 Most distributions have a CRDA package. So if unsure, say N.
111 112
112config CFG80211_WEXT 113config CFG80211_WEXT
113 bool "cfg80211 wireless extensions compatibility" 114 bool "cfg80211 wireless extensions compatibility"
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index f07c8dc7aab2..e77e508126fa 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -13,5 +13,11 @@ cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
13cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o 13cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o
14cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o 14cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
15cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o 15cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o
16cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o
16 17
17ccflags-y += -D__CHECK_ENDIAN__ 18ccflags-y += -D__CHECK_ENDIAN__
19
20$(obj)/regdb.c: $(src)/db.txt $(src)/genregdb.awk
21 @$(AWK) -f $(srctree)/$(src)/genregdb.awk < $< > $@
22
23clean-files := regdb.c
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index a46ac6c9b365..bf1737fc9a7e 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -41,44 +41,57 @@ rdev_fixed_channel(struct cfg80211_registered_device *rdev,
41 return result; 41 return result;
42} 42}
43 43
44int rdev_set_freq(struct cfg80211_registered_device *rdev, 44struct ieee80211_channel *
45 struct wireless_dev *for_wdev, 45rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
46 int freq, enum nl80211_channel_type channel_type) 46 int freq, enum nl80211_channel_type channel_type)
47{ 47{
48 struct ieee80211_channel *chan; 48 struct ieee80211_channel *chan;
49 struct ieee80211_sta_ht_cap *ht_cap; 49 struct ieee80211_sta_ht_cap *ht_cap;
50 int result;
51
52 if (rdev_fixed_channel(rdev, for_wdev))
53 return -EBUSY;
54
55 if (!rdev->ops->set_channel)
56 return -EOPNOTSUPP;
57 50
58 chan = ieee80211_get_channel(&rdev->wiphy, freq); 51 chan = ieee80211_get_channel(&rdev->wiphy, freq);
59 52
60 /* Primary channel not allowed */ 53 /* Primary channel not allowed */
61 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) 54 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
62 return -EINVAL; 55 return NULL;
63 56
64 if (channel_type == NL80211_CHAN_HT40MINUS && 57 if (channel_type == NL80211_CHAN_HT40MINUS &&
65 chan->flags & IEEE80211_CHAN_NO_HT40MINUS) 58 chan->flags & IEEE80211_CHAN_NO_HT40MINUS)
66 return -EINVAL; 59 return NULL;
67 else if (channel_type == NL80211_CHAN_HT40PLUS && 60 else if (channel_type == NL80211_CHAN_HT40PLUS &&
68 chan->flags & IEEE80211_CHAN_NO_HT40PLUS) 61 chan->flags & IEEE80211_CHAN_NO_HT40PLUS)
69 return -EINVAL; 62 return NULL;
70 63
71 ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap; 64 ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap;
72 65
73 if (channel_type != NL80211_CHAN_NO_HT) { 66 if (channel_type != NL80211_CHAN_NO_HT) {
74 if (!ht_cap->ht_supported) 67 if (!ht_cap->ht_supported)
75 return -EINVAL; 68 return NULL;
76 69
77 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || 70 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
78 ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT) 71 ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)
79 return -EINVAL; 72 return NULL;
80 } 73 }
81 74
75 return chan;
76}
77
78int rdev_set_freq(struct cfg80211_registered_device *rdev,
79 struct wireless_dev *for_wdev,
80 int freq, enum nl80211_channel_type channel_type)
81{
82 struct ieee80211_channel *chan;
83 int result;
84
85 if (rdev_fixed_channel(rdev, for_wdev))
86 return -EBUSY;
87
88 if (!rdev->ops->set_channel)
89 return -EOPNOTSUPP;
90
91 chan = rdev_freq_to_chan(rdev, freq, channel_type);
92 if (!chan)
93 return -EINVAL;
94
82 result = rdev->ops->set_channel(&rdev->wiphy, chan, channel_type); 95 result = rdev->ops->set_channel(&rdev->wiphy, chan, channel_type);
83 if (result) 96 if (result)
84 return result; 97 return result;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index c2a2c563d21a..7fdb9409ad2a 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * This is the linux wireless configuration interface. 2 * This is the linux wireless configuration interface.
3 * 3 *
4 * Copyright 2006-2009 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6 6
7#include <linux/if.h> 7#include <linux/if.h>
@@ -31,15 +31,10 @@ MODULE_AUTHOR("Johannes Berg");
31MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
32MODULE_DESCRIPTION("wireless configuration support"); 32MODULE_DESCRIPTION("wireless configuration support");
33 33
34/* RCU might be appropriate here since we usually 34/* RCU-protected (and cfg80211_mutex for writers) */
35 * only read the list, and that can happen quite
36 * often because we need to do it for each command */
37LIST_HEAD(cfg80211_rdev_list); 35LIST_HEAD(cfg80211_rdev_list);
38int cfg80211_rdev_list_generation; 36int cfg80211_rdev_list_generation;
39 37
40/*
41 * This is used to protect the cfg80211_rdev_list
42 */
43DEFINE_MUTEX(cfg80211_mutex); 38DEFINE_MUTEX(cfg80211_mutex);
44 39
45/* for debugfs */ 40/* for debugfs */
@@ -402,6 +397,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
402 rdev->wiphy.retry_long = 4; 397 rdev->wiphy.retry_long = 4;
403 rdev->wiphy.frag_threshold = (u32) -1; 398 rdev->wiphy.frag_threshold = (u32) -1;
404 rdev->wiphy.rts_threshold = (u32) -1; 399 rdev->wiphy.rts_threshold = (u32) -1;
400 rdev->wiphy.coverage_class = 0;
405 401
406 return &rdev->wiphy; 402 return &rdev->wiphy;
407} 403}
@@ -417,6 +413,18 @@ int wiphy_register(struct wiphy *wiphy)
417 int i; 413 int i;
418 u16 ifmodes = wiphy->interface_modes; 414 u16 ifmodes = wiphy->interface_modes;
419 415
416 if (WARN_ON(wiphy->addresses && !wiphy->n_addresses))
417 return -EINVAL;
418
419 if (WARN_ON(wiphy->addresses &&
420 !is_zero_ether_addr(wiphy->perm_addr) &&
421 memcmp(wiphy->perm_addr, wiphy->addresses[0].addr,
422 ETH_ALEN)))
423 return -EINVAL;
424
425 if (wiphy->addresses)
426 memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN);
427
420 /* sanity check ifmodes */ 428 /* sanity check ifmodes */
421 WARN_ON(!ifmodes); 429 WARN_ON(!ifmodes);
422 ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1; 430 ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1;
@@ -476,7 +484,7 @@ int wiphy_register(struct wiphy *wiphy)
476 /* set up regulatory info */ 484 /* set up regulatory info */
477 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); 485 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
478 486
479 list_add(&rdev->list, &cfg80211_rdev_list); 487 list_add_rcu(&rdev->list, &cfg80211_rdev_list);
480 cfg80211_rdev_list_generation++; 488 cfg80211_rdev_list_generation++;
481 489
482 mutex_unlock(&cfg80211_mutex); 490 mutex_unlock(&cfg80211_mutex);
@@ -553,7 +561,8 @@ void wiphy_unregister(struct wiphy *wiphy)
553 * it impossible to find from userspace. 561 * it impossible to find from userspace.
554 */ 562 */
555 debugfs_remove_recursive(rdev->wiphy.debugfsdir); 563 debugfs_remove_recursive(rdev->wiphy.debugfsdir);
556 list_del(&rdev->list); 564 list_del_rcu(&rdev->list);
565 synchronize_rcu();
557 566
558 /* 567 /*
559 * Try to grab rdev->mtx. If a command is still in progress, 568 * Try to grab rdev->mtx. If a command is still in progress,
@@ -668,8 +677,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
668 INIT_WORK(&wdev->cleanup_work, wdev_cleanup_work); 677 INIT_WORK(&wdev->cleanup_work, wdev_cleanup_work);
669 INIT_LIST_HEAD(&wdev->event_list); 678 INIT_LIST_HEAD(&wdev->event_list);
670 spin_lock_init(&wdev->event_lock); 679 spin_lock_init(&wdev->event_lock);
680 INIT_LIST_HEAD(&wdev->action_registrations);
681 spin_lock_init(&wdev->action_registrations_lock);
682
671 mutex_lock(&rdev->devlist_mtx); 683 mutex_lock(&rdev->devlist_mtx);
672 list_add(&wdev->list, &rdev->netdev_list); 684 list_add_rcu(&wdev->list, &rdev->netdev_list);
673 rdev->devlist_generation++; 685 rdev->devlist_generation++;
674 /* can only change netns with wiphy */ 686 /* can only change netns with wiphy */
675 dev->features |= NETIF_F_NETNS_LOCAL; 687 dev->features |= NETIF_F_NETNS_LOCAL;
@@ -686,19 +698,21 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
686 wdev->wext.default_key = -1; 698 wdev->wext.default_key = -1;
687 wdev->wext.default_mgmt_key = -1; 699 wdev->wext.default_mgmt_key = -1;
688 wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; 700 wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
701#endif
702
689 if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT) 703 if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT)
690 wdev->wext.ps = true; 704 wdev->ps = true;
691 else 705 else
692 wdev->wext.ps = false; 706 wdev->ps = false;
693 wdev->wext.ps_timeout = 100; 707 wdev->ps_timeout = 100;
694 if (rdev->ops->set_power_mgmt) 708 if (rdev->ops->set_power_mgmt)
695 if (rdev->ops->set_power_mgmt(wdev->wiphy, dev, 709 if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
696 wdev->wext.ps, 710 wdev->ps,
697 wdev->wext.ps_timeout)) { 711 wdev->ps_timeout)) {
698 /* assume this means it's off */ 712 /* assume this means it's off */
699 wdev->wext.ps = false; 713 wdev->ps = false;
700 } 714 }
701#endif 715
702 if (!dev->ethtool_ops) 716 if (!dev->ethtool_ops)
703 dev->ethtool_ops = &cfg80211_ethtool_ops; 717 dev->ethtool_ops = &cfg80211_ethtool_ops;
704 718
@@ -745,9 +759,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
745 mutex_unlock(&rdev->devlist_mtx); 759 mutex_unlock(&rdev->devlist_mtx);
746 dev_put(dev); 760 dev_put(dev);
747 } 761 }
748#ifdef CONFIG_CFG80211_WEXT
749 cfg80211_lock_rdev(rdev); 762 cfg80211_lock_rdev(rdev);
750 mutex_lock(&rdev->devlist_mtx); 763 mutex_lock(&rdev->devlist_mtx);
764#ifdef CONFIG_CFG80211_WEXT
751 wdev_lock(wdev); 765 wdev_lock(wdev);
752 switch (wdev->iftype) { 766 switch (wdev->iftype) {
753 case NL80211_IFTYPE_ADHOC: 767 case NL80211_IFTYPE_ADHOC:
@@ -760,10 +774,10 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
760 break; 774 break;
761 } 775 }
762 wdev_unlock(wdev); 776 wdev_unlock(wdev);
777#endif
763 rdev->opencount++; 778 rdev->opencount++;
764 mutex_unlock(&rdev->devlist_mtx); 779 mutex_unlock(&rdev->devlist_mtx);
765 cfg80211_unlock_rdev(rdev); 780 cfg80211_unlock_rdev(rdev);
766#endif
767 break; 781 break;
768 case NETDEV_UNREGISTER: 782 case NETDEV_UNREGISTER:
769 /* 783 /*
@@ -781,13 +795,22 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
781 */ 795 */
782 if (!list_empty(&wdev->list)) { 796 if (!list_empty(&wdev->list)) {
783 sysfs_remove_link(&dev->dev.kobj, "phy80211"); 797 sysfs_remove_link(&dev->dev.kobj, "phy80211");
784 list_del_init(&wdev->list); 798 list_del_rcu(&wdev->list);
785 rdev->devlist_generation++; 799 rdev->devlist_generation++;
800 cfg80211_mlme_purge_actions(wdev);
786#ifdef CONFIG_CFG80211_WEXT 801#ifdef CONFIG_CFG80211_WEXT
787 kfree(wdev->wext.keys); 802 kfree(wdev->wext.keys);
788#endif 803#endif
789 } 804 }
790 mutex_unlock(&rdev->devlist_mtx); 805 mutex_unlock(&rdev->devlist_mtx);
806 /*
807 * synchronise (so that we won't find this netdev
808 * from other code any more) and then clear the list
809 * head so that the above code can safely check for
810 * !list_empty() to avoid double-cleanup.
811 */
812 synchronize_rcu();
813 INIT_LIST_HEAD(&wdev->list);
791 break; 814 break;
792 case NETDEV_PRE_UP: 815 case NETDEV_PRE_UP:
793 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) 816 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 4ef3efc94106..d52da913145a 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Wireless configuration interface internals. 2 * Wireless configuration interface internals.
3 * 3 *
4 * Copyright 2006-2009 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6#ifndef __NET_WIRELESS_CORE_H 6#ifndef __NET_WIRELESS_CORE_H
7#define __NET_WIRELESS_CORE_H 7#define __NET_WIRELESS_CORE_H
@@ -48,6 +48,7 @@ struct cfg80211_registered_device {
48 48
49 /* associate netdev list */ 49 /* associate netdev list */
50 struct mutex devlist_mtx; 50 struct mutex devlist_mtx;
51 /* protected by devlist_mtx or RCU */
51 struct list_head netdev_list; 52 struct list_head netdev_list;
52 int devlist_generation; 53 int devlist_generation;
53 int opencount; /* also protected by devlist_mtx */ 54 int opencount; /* also protected by devlist_mtx */
@@ -111,7 +112,8 @@ struct cfg80211_internal_bss {
111 unsigned long ts; 112 unsigned long ts;
112 struct kref ref; 113 struct kref ref;
113 atomic_t hold; 114 atomic_t hold;
114 bool ies_allocated; 115 bool beacon_ies_allocated;
116 bool proberesp_ies_allocated;
115 117
116 /* must be last because of priv member */ 118 /* must be last because of priv member */
117 struct cfg80211_bss pub; 119 struct cfg80211_bss pub;
@@ -327,6 +329,15 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
327 const u8 *resp_ie, size_t resp_ie_len, 329 const u8 *resp_ie, size_t resp_ie_len,
328 u16 status, bool wextev, 330 u16 status, bool wextev,
329 struct cfg80211_bss *bss); 331 struct cfg80211_bss *bss);
332int cfg80211_mlme_register_action(struct wireless_dev *wdev, u32 snd_pid,
333 const u8 *match_data, int match_len);
334void cfg80211_mlme_unregister_actions(struct wireless_dev *wdev, u32 nlpid);
335void cfg80211_mlme_purge_actions(struct wireless_dev *wdev);
336int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
337 struct net_device *dev,
338 struct ieee80211_channel *chan,
339 enum nl80211_channel_type channel_type,
340 const u8 *buf, size_t len, u64 *cookie);
330 341
331/* SME */ 342/* SME */
332int __cfg80211_connect(struct cfg80211_registered_device *rdev, 343int __cfg80211_connect(struct cfg80211_registered_device *rdev,
@@ -374,10 +385,15 @@ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
374struct ieee80211_channel * 385struct ieee80211_channel *
375rdev_fixed_channel(struct cfg80211_registered_device *rdev, 386rdev_fixed_channel(struct cfg80211_registered_device *rdev,
376 struct wireless_dev *for_wdev); 387 struct wireless_dev *for_wdev);
388struct ieee80211_channel *
389rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
390 int freq, enum nl80211_channel_type channel_type);
377int rdev_set_freq(struct cfg80211_registered_device *rdev, 391int rdev_set_freq(struct cfg80211_registered_device *rdev,
378 struct wireless_dev *for_wdev, 392 struct wireless_dev *for_wdev,
379 int freq, enum nl80211_channel_type channel_type); 393 int freq, enum nl80211_channel_type channel_type);
380 394
395u16 cfg80211_calculate_bitrate(struct rate_info *rate);
396
381#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS 397#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
382#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond) 398#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
383#else 399#else
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
new file mode 100644
index 000000000000..a2fc3a09ccdc
--- /dev/null
+++ b/net/wireless/db.txt
@@ -0,0 +1,17 @@
1#
2# This file is a placeholder to prevent accidental build breakage if someone
3# enables CONFIG_CFG80211_INTERNAL_REGDB. Almost no one actually needs to
4# enable that build option.
5#
6# You should be using CRDA instead. It is even better if you use the CRDA
7# package provided by your distribution, since they will probably keep it
8# up-to-date on your behalf.
9#
10# If you _really_ intend to use CONFIG_CFG80211_INTERNAL_REGDB then you will
11# need to replace this file with one containing appropriately formatted
12# regulatory rules that cover the regulatory domains you will be using. Your
13# best option is to extract the db.txt file from the wireless-regdb git
14# repository:
15#
16# git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-regdb.git
17#
diff --git a/net/wireless/genregdb.awk b/net/wireless/genregdb.awk
new file mode 100644
index 000000000000..3cc9e69880a8
--- /dev/null
+++ b/net/wireless/genregdb.awk
@@ -0,0 +1,118 @@
1#!/usr/bin/awk -f
2#
3# genregdb.awk -- generate regdb.c from db.txt
4#
5# Actually, it reads from stdin (presumed to be db.txt) and writes
6# to stdout (presumed to be regdb.c), but close enough...
7#
8# Copyright 2009 John W. Linville <linville@tuxdriver.com>
9#
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU General Public License version 2 as
12# published by the Free Software Foundation.
13#
14
15BEGIN {
16 active = 0
17 rules = 0;
18 print "/*"
19 print " * DO NOT EDIT -- file generated from data in db.txt"
20 print " */"
21 print ""
22 print "#include <linux/nl80211.h>"
23 print "#include <net/cfg80211.h>"
24 print ""
25 regdb = "const struct ieee80211_regdomain *reg_regdb[] = {\n"
26}
27
28/^[ \t]*#/ {
29 # Ignore
30}
31
32!active && /^[ \t]*$/ {
33 # Ignore
34}
35
36!active && /country/ {
37 country=$2
38 sub(/:/, "", country)
39 printf "static const struct ieee80211_regdomain regdom_%s = {\n", country
40 printf "\t.alpha2 = \"%s\",\n", country
41 printf "\t.reg_rules = {\n"
42 active = 1
43 regdb = regdb "\t&regdom_" country ",\n"
44}
45
46active && /^[ \t]*\(/ {
47 start = $1
48 sub(/\(/, "", start)
49 end = $3
50 bw = $5
51 sub(/\),/, "", bw)
52 gain = $6
53 sub(/\(/, "", gain)
54 sub(/,/, "", gain)
55 power = $7
56 sub(/\)/, "", power)
57 sub(/,/, "", power)
58 # power might be in mW...
59 units = $8
60 sub(/\)/, "", units)
61 sub(/,/, "", units)
62 if (units == "mW") {
63 if (power == 100) {
64 power = 20
65 } else if (power == 200) {
66 power = 23
67 } else if (power == 500) {
68 power = 27
69 } else if (power == 1000) {
70 power = 30
71 } else {
72 print "Unknown power value in database!"
73 }
74 }
75 flagstr = ""
76 for (i=8; i<=NF; i++)
77 flagstr = flagstr $i
78 split(flagstr, flagarray, ",")
79 flags = ""
80 for (arg in flagarray) {
81 if (flagarray[arg] == "NO-OFDM") {
82 flags = flags "\n\t\t\tNL80211_RRF_NO_OFDM | "
83 } else if (flagarray[arg] == "NO-CCK") {
84 flags = flags "\n\t\t\tNL80211_RRF_NO_CCK | "
85 } else if (flagarray[arg] == "NO-INDOOR") {
86 flags = flags "\n\t\t\tNL80211_RRF_NO_INDOOR | "
87 } else if (flagarray[arg] == "NO-OUTDOOR") {
88 flags = flags "\n\t\t\tNL80211_RRF_NO_OUTDOOR | "
89 } else if (flagarray[arg] == "DFS") {
90 flags = flags "\n\t\t\tNL80211_RRF_DFS | "
91 } else if (flagarray[arg] == "PTP-ONLY") {
92 flags = flags "\n\t\t\tNL80211_RRF_PTP_ONLY | "
93 } else if (flagarray[arg] == "PTMP-ONLY") {
94 flags = flags "\n\t\t\tNL80211_RRF_PTMP_ONLY | "
95 } else if (flagarray[arg] == "PASSIVE-SCAN") {
96 flags = flags "\n\t\t\tNL80211_RRF_PASSIVE_SCAN | "
97 } else if (flagarray[arg] == "NO-IBSS") {
98 flags = flags "\n\t\t\tNL80211_RRF_NO_IBSS | "
99 }
100 }
101 flags = flags "0"
102 printf "\t\tREG_RULE(%d, %d, %d, %d, %d, %s),\n", start, end, bw, gain, power, flags
103 rules++
104}
105
106active && /^[ \t]*$/ {
107 active = 0
108 printf "\t},\n"
109 printf "\t.n_reg_rules = %d\n", rules
110 printf "};\n\n"
111 rules = 0;
112}
113
114END {
115 print regdb "};"
116 print ""
117 print "int reg_regdb_size = ARRAY_SIZE(reg_regdb);"
118}
diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c
index 2301dc1edc4c..b7fa31d5fd13 100644
--- a/net/wireless/lib80211_crypt_ccmp.c
+++ b/net/wireless/lib80211_crypt_ccmp.c
@@ -237,7 +237,6 @@ static int lib80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
237 return -1; 237 return -1;
238 238
239 pos = skb->data + hdr_len + CCMP_HDR_LEN; 239 pos = skb->data + hdr_len + CCMP_HDR_LEN;
240 mic = skb_put(skb, CCMP_MIC_LEN);
241 hdr = (struct ieee80211_hdr *)skb->data; 240 hdr = (struct ieee80211_hdr *)skb->data;
242 ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); 241 ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
243 242
@@ -257,6 +256,7 @@ static int lib80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
257 pos += len; 256 pos += len;
258 } 257 }
259 258
259 mic = skb_put(skb, CCMP_MIC_LEN);
260 for (i = 0; i < CCMP_MIC_LEN; i++) 260 for (i = 0; i < CCMP_MIC_LEN; i++)
261 mic[i] = b[i] ^ s0[i]; 261 mic[i] = b[i] ^ s0[i];
262 262
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index c36287399d7e..8cbdb32ff316 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -36,6 +36,8 @@ MODULE_AUTHOR("Jouni Malinen");
36MODULE_DESCRIPTION("lib80211 crypt: TKIP"); 36MODULE_DESCRIPTION("lib80211 crypt: TKIP");
37MODULE_LICENSE("GPL"); 37MODULE_LICENSE("GPL");
38 38
39#define TKIP_HDR_LEN 8
40
39struct lib80211_tkip_data { 41struct lib80211_tkip_data {
40#define TKIP_KEY_LEN 32 42#define TKIP_KEY_LEN 32
41 u8 key[TKIP_KEY_LEN]; 43 u8 key[TKIP_KEY_LEN];
@@ -314,13 +316,12 @@ static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
314 u8 * rc4key, int keylen, void *priv) 316 u8 * rc4key, int keylen, void *priv)
315{ 317{
316 struct lib80211_tkip_data *tkey = priv; 318 struct lib80211_tkip_data *tkey = priv;
317 int len;
318 u8 *pos; 319 u8 *pos;
319 struct ieee80211_hdr *hdr; 320 struct ieee80211_hdr *hdr;
320 321
321 hdr = (struct ieee80211_hdr *)skb->data; 322 hdr = (struct ieee80211_hdr *)skb->data;
322 323
323 if (skb_headroom(skb) < 8 || skb->len < hdr_len) 324 if (skb_headroom(skb) < TKIP_HDR_LEN || skb->len < hdr_len)
324 return -1; 325 return -1;
325 326
326 if (rc4key == NULL || keylen < 16) 327 if (rc4key == NULL || keylen < 16)
@@ -333,9 +334,8 @@ static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
333 } 334 }
334 tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16); 335 tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16);
335 336
336 len = skb->len - hdr_len; 337 pos = skb_push(skb, TKIP_HDR_LEN);
337 pos = skb_push(skb, 8); 338 memmove(pos, pos + TKIP_HDR_LEN, hdr_len);
338 memmove(pos, pos + 8, hdr_len);
339 pos += hdr_len; 339 pos += hdr_len;
340 340
341 *pos++ = *rc4key; 341 *pos++ = *rc4key;
@@ -353,7 +353,7 @@ static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
353 tkey->tx_iv32++; 353 tkey->tx_iv32++;
354 } 354 }
355 355
356 return 8; 356 return TKIP_HDR_LEN;
357} 357}
358 358
359static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) 359static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
@@ -384,9 +384,8 @@ static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
384 if ((lib80211_tkip_hdr(skb, hdr_len, rc4key, 16, priv)) < 0) 384 if ((lib80211_tkip_hdr(skb, hdr_len, rc4key, 16, priv)) < 0)
385 return -1; 385 return -1;
386 386
387 icv = skb_put(skb, 4);
388
389 crc = ~crc32_le(~0, pos, len); 387 crc = ~crc32_le(~0, pos, len);
388 icv = skb_put(skb, 4);
390 icv[0] = crc; 389 icv[0] = crc;
391 icv[1] = crc >> 8; 390 icv[1] = crc >> 8;
392 icv[2] = crc >> 16; 391 icv[2] = crc >> 16;
@@ -434,7 +433,7 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
434 return -1; 433 return -1;
435 } 434 }
436 435
437 if (skb->len < hdr_len + 8 + 4) 436 if (skb->len < hdr_len + TKIP_HDR_LEN + 4)
438 return -1; 437 return -1;
439 438
440 pos = skb->data + hdr_len; 439 pos = skb->data + hdr_len;
@@ -462,7 +461,7 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
462 } 461 }
463 iv16 = (pos[0] << 8) | pos[2]; 462 iv16 = (pos[0] << 8) | pos[2];
464 iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24); 463 iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24);
465 pos += 8; 464 pos += TKIP_HDR_LEN;
466 465
467 if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) { 466 if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) {
468#ifdef CONFIG_LIB80211_DEBUG 467#ifdef CONFIG_LIB80211_DEBUG
@@ -523,8 +522,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
523 tkey->rx_iv16_new = iv16; 522 tkey->rx_iv16_new = iv16;
524 523
525 /* Remove IV and ICV */ 524 /* Remove IV and ICV */
526 memmove(skb->data + 8, skb->data, hdr_len); 525 memmove(skb->data + TKIP_HDR_LEN, skb->data, hdr_len);
527 skb_pull(skb, 8); 526 skb_pull(skb, TKIP_HDR_LEN);
528 skb_trim(skb, skb->len - 4); 527 skb_trim(skb, skb->len - 4);
529 528
530 return keyidx; 529 return keyidx;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 1001db4912f7..62bc8855e123 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -93,7 +93,18 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
93 } 93 }
94 } 94 }
95 95
96 WARN_ON(!bss); 96 /*
97 * We might be coming here because the driver reported
98 * a successful association at the same time as the
99 * user requested a deauth. In that case, we will have
100 * removed the BSS from the auth_bsses list due to the
101 * deauth request when the assoc response makes it. If
102 * the two code paths acquire the lock the other way
103 * around, that's just the standard situation of a
104 * deauth being requested while connected.
105 */
106 if (!bss)
107 goto out;
97 } else if (wdev->conn) { 108 } else if (wdev->conn) {
98 cfg80211_sme_failed_assoc(wdev); 109 cfg80211_sme_failed_assoc(wdev);
99 /* 110 /*
@@ -137,22 +148,23 @@ void __cfg80211_send_deauth(struct net_device *dev,
137 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 148 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
138 const u8 *bssid = mgmt->bssid; 149 const u8 *bssid = mgmt->bssid;
139 int i; 150 int i;
151 bool found = false;
140 152
141 ASSERT_WDEV_LOCK(wdev); 153 ASSERT_WDEV_LOCK(wdev);
142 154
143 nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
144
145 if (wdev->current_bss && 155 if (wdev->current_bss &&
146 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { 156 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
147 cfg80211_unhold_bss(wdev->current_bss); 157 cfg80211_unhold_bss(wdev->current_bss);
148 cfg80211_put_bss(&wdev->current_bss->pub); 158 cfg80211_put_bss(&wdev->current_bss->pub);
149 wdev->current_bss = NULL; 159 wdev->current_bss = NULL;
160 found = true;
150 } else for (i = 0; i < MAX_AUTH_BSSES; i++) { 161 } else for (i = 0; i < MAX_AUTH_BSSES; i++) {
151 if (wdev->auth_bsses[i] && 162 if (wdev->auth_bsses[i] &&
152 memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) { 163 memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) {
153 cfg80211_unhold_bss(wdev->auth_bsses[i]); 164 cfg80211_unhold_bss(wdev->auth_bsses[i]);
154 cfg80211_put_bss(&wdev->auth_bsses[i]->pub); 165 cfg80211_put_bss(&wdev->auth_bsses[i]->pub);
155 wdev->auth_bsses[i] = NULL; 166 wdev->auth_bsses[i] = NULL;
167 found = true;
156 break; 168 break;
157 } 169 }
158 if (wdev->authtry_bsses[i] && 170 if (wdev->authtry_bsses[i] &&
@@ -160,10 +172,16 @@ void __cfg80211_send_deauth(struct net_device *dev,
160 cfg80211_unhold_bss(wdev->authtry_bsses[i]); 172 cfg80211_unhold_bss(wdev->authtry_bsses[i]);
161 cfg80211_put_bss(&wdev->authtry_bsses[i]->pub); 173 cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
162 wdev->authtry_bsses[i] = NULL; 174 wdev->authtry_bsses[i] = NULL;
175 found = true;
163 break; 176 break;
164 } 177 }
165 } 178 }
166 179
180 if (!found)
181 return;
182
183 nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
184
167 if (wdev->sme_state == CFG80211_SME_CONNECTED) { 185 if (wdev->sme_state == CFG80211_SME_CONNECTED) {
168 u16 reason_code; 186 u16 reason_code;
169 bool from_ap; 187 bool from_ap;
@@ -673,3 +691,206 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
673 } 691 }
674 } 692 }
675} 693}
694
695void cfg80211_ready_on_channel(struct net_device *dev, u64 cookie,
696 struct ieee80211_channel *chan,
697 enum nl80211_channel_type channel_type,
698 unsigned int duration, gfp_t gfp)
699{
700 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
701 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
702
703 nl80211_send_remain_on_channel(rdev, dev, cookie, chan, channel_type,
704 duration, gfp);
705}
706EXPORT_SYMBOL(cfg80211_ready_on_channel);
707
708void cfg80211_remain_on_channel_expired(struct net_device *dev,
709 u64 cookie,
710 struct ieee80211_channel *chan,
711 enum nl80211_channel_type channel_type,
712 gfp_t gfp)
713{
714 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
715 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
716
717 nl80211_send_remain_on_channel_cancel(rdev, dev, cookie, chan,
718 channel_type, gfp);
719}
720EXPORT_SYMBOL(cfg80211_remain_on_channel_expired);
721
722void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
723 struct station_info *sinfo, gfp_t gfp)
724{
725 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
726 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
727
728 nl80211_send_sta_event(rdev, dev, mac_addr, sinfo, gfp);
729}
730EXPORT_SYMBOL(cfg80211_new_sta);
731
732struct cfg80211_action_registration {
733 struct list_head list;
734
735 u32 nlpid;
736
737 int match_len;
738
739 u8 match[];
740};
741
742int cfg80211_mlme_register_action(struct wireless_dev *wdev, u32 snd_pid,
743 const u8 *match_data, int match_len)
744{
745 struct cfg80211_action_registration *reg, *nreg;
746 int err = 0;
747
748 nreg = kzalloc(sizeof(*reg) + match_len, GFP_KERNEL);
749 if (!nreg)
750 return -ENOMEM;
751
752 spin_lock_bh(&wdev->action_registrations_lock);
753
754 list_for_each_entry(reg, &wdev->action_registrations, list) {
755 int mlen = min(match_len, reg->match_len);
756
757 if (memcmp(reg->match, match_data, mlen) == 0) {
758 err = -EALREADY;
759 break;
760 }
761 }
762
763 if (err) {
764 kfree(nreg);
765 goto out;
766 }
767
768 memcpy(nreg->match, match_data, match_len);
769 nreg->match_len = match_len;
770 nreg->nlpid = snd_pid;
771 list_add(&nreg->list, &wdev->action_registrations);
772
773 out:
774 spin_unlock_bh(&wdev->action_registrations_lock);
775 return err;
776}
777
778void cfg80211_mlme_unregister_actions(struct wireless_dev *wdev, u32 nlpid)
779{
780 struct cfg80211_action_registration *reg, *tmp;
781
782 spin_lock_bh(&wdev->action_registrations_lock);
783
784 list_for_each_entry_safe(reg, tmp, &wdev->action_registrations, list) {
785 if (reg->nlpid == nlpid) {
786 list_del(&reg->list);
787 kfree(reg);
788 }
789 }
790
791 spin_unlock_bh(&wdev->action_registrations_lock);
792}
793
794void cfg80211_mlme_purge_actions(struct wireless_dev *wdev)
795{
796 struct cfg80211_action_registration *reg, *tmp;
797
798 spin_lock_bh(&wdev->action_registrations_lock);
799
800 list_for_each_entry_safe(reg, tmp, &wdev->action_registrations, list) {
801 list_del(&reg->list);
802 kfree(reg);
803 }
804
805 spin_unlock_bh(&wdev->action_registrations_lock);
806}
807
808int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
809 struct net_device *dev,
810 struct ieee80211_channel *chan,
811 enum nl80211_channel_type channel_type,
812 const u8 *buf, size_t len, u64 *cookie)
813{
814 struct wireless_dev *wdev = dev->ieee80211_ptr;
815 const struct ieee80211_mgmt *mgmt;
816
817 if (rdev->ops->action == NULL)
818 return -EOPNOTSUPP;
819 if (len < 24 + 1)
820 return -EINVAL;
821
822 mgmt = (const struct ieee80211_mgmt *) buf;
823 if (!ieee80211_is_action(mgmt->frame_control))
824 return -EINVAL;
825 if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) {
826 /* Verify that we are associated with the destination AP */
827 if (!wdev->current_bss ||
828 memcmp(wdev->current_bss->pub.bssid, mgmt->bssid,
829 ETH_ALEN) != 0 ||
830 memcmp(wdev->current_bss->pub.bssid, mgmt->da,
831 ETH_ALEN) != 0)
832 return -ENOTCONN;
833 }
834
835 if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0)
836 return -EINVAL;
837
838 /* Transmit the Action frame as requested by user space */
839 return rdev->ops->action(&rdev->wiphy, dev, chan, channel_type,
840 buf, len, cookie);
841}
842
843bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf,
844 size_t len, gfp_t gfp)
845{
846 struct wireless_dev *wdev = dev->ieee80211_ptr;
847 struct wiphy *wiphy = wdev->wiphy;
848 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
849 struct cfg80211_action_registration *reg;
850 const u8 *action_data;
851 int action_data_len;
852 bool result = false;
853
854 /* frame length - min size excluding category */
855 action_data_len = len - (IEEE80211_MIN_ACTION_SIZE - 1);
856
857 /* action data starts with category */
858 action_data = buf + IEEE80211_MIN_ACTION_SIZE - 1;
859
860 spin_lock_bh(&wdev->action_registrations_lock);
861
862 list_for_each_entry(reg, &wdev->action_registrations, list) {
863 if (reg->match_len > action_data_len)
864 continue;
865
866 if (memcmp(reg->match, action_data, reg->match_len))
867 continue;
868
869 /* found match! */
870
871 /* Indicate the received Action frame to user space */
872 if (nl80211_send_action(rdev, dev, reg->nlpid, freq,
873 buf, len, gfp))
874 continue;
875
876 result = true;
877 break;
878 }
879
880 spin_unlock_bh(&wdev->action_registrations_lock);
881
882 return result;
883}
884EXPORT_SYMBOL(cfg80211_rx_action);
885
886void cfg80211_action_tx_status(struct net_device *dev, u64 cookie,
887 const u8 *buf, size_t len, bool ack, gfp_t gfp)
888{
889 struct wireless_dev *wdev = dev->ieee80211_ptr;
890 struct wiphy *wiphy = wdev->wiphy;
891 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
892
893 /* Indicate TX status of the Action frame to user space */
894 nl80211_send_action_tx_status(rdev, dev, cookie, buf, len, ack, gfp);
895}
896EXPORT_SYMBOL(cfg80211_action_tx_status);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a6028433e3a0..e447db04cf76 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * This is the new netlink-based wireless configuration interface. 2 * This is the new netlink-based wireless configuration interface.
3 * 3 *
4 * Copyright 2006-2009 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6 6
7#include <linux/if.h> 7#include <linux/if.h>
@@ -58,7 +58,7 @@ static int get_rdev_dev_by_info_ifindex(struct genl_info *info,
58} 58}
59 59
60/* policy for the attributes */ 60/* policy for the attributes */
61static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = { 61static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
62 [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, 62 [NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
63 [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, 63 [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING,
64 .len = 20-1 }, 64 .len = 20-1 },
@@ -69,6 +69,7 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
69 [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 }, 69 [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 },
70 [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 }, 70 [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 },
71 [NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 }, 71 [NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 },
72 [NL80211_ATTR_WIPHY_COVERAGE_CLASS] = { .type = NLA_U8 },
72 73
73 [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 }, 74 [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 },
74 [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, 75 [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
@@ -141,11 +142,17 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
141 [NL80211_ATTR_4ADDR] = { .type = NLA_U8 }, 142 [NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
142 [NL80211_ATTR_PMKID] = { .type = NLA_BINARY, 143 [NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
143 .len = WLAN_PMKID_LEN }, 144 .len = WLAN_PMKID_LEN },
145 [NL80211_ATTR_DURATION] = { .type = NLA_U32 },
146 [NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
147 [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
148 [NL80211_ATTR_FRAME] = { .type = NLA_BINARY,
149 .len = IEEE80211_MAX_DATA_LEN },
150 [NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, },
151 [NL80211_ATTR_PS_STATE] = { .type = NLA_U32 },
144}; 152};
145 153
146/* policy for the attributes */ 154/* policy for the attributes */
147static struct nla_policy 155static const struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] = {
148nl80211_key_policy[NL80211_KEY_MAX + 1] __read_mostly = {
149 [NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN }, 156 [NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN },
150 [NL80211_KEY_IDX] = { .type = NLA_U8 }, 157 [NL80211_KEY_IDX] = { .type = NLA_U8 },
151 [NL80211_KEY_CIPHER] = { .type = NLA_U32 }, 158 [NL80211_KEY_CIPHER] = { .type = NLA_U32 },
@@ -442,6 +449,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
442 dev->wiphy.frag_threshold); 449 dev->wiphy.frag_threshold);
443 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD, 450 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
444 dev->wiphy.rts_threshold); 451 dev->wiphy.rts_threshold);
452 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
453 dev->wiphy.coverage_class);
445 454
446 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, 455 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
447 dev->wiphy.max_scan_ssids); 456 dev->wiphy.max_scan_ssids);
@@ -569,6 +578,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
569 CMD(set_pmksa, SET_PMKSA); 578 CMD(set_pmksa, SET_PMKSA);
570 CMD(del_pmksa, DEL_PMKSA); 579 CMD(del_pmksa, DEL_PMKSA);
571 CMD(flush_pmksa, FLUSH_PMKSA); 580 CMD(flush_pmksa, FLUSH_PMKSA);
581 CMD(remain_on_channel, REMAIN_ON_CHANNEL);
582 CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
583 CMD(action, ACTION);
572 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) { 584 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
573 i++; 585 i++;
574 NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); 586 NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
@@ -681,6 +693,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
681 u32 changed; 693 u32 changed;
682 u8 retry_short = 0, retry_long = 0; 694 u8 retry_short = 0, retry_long = 0;
683 u32 frag_threshold = 0, rts_threshold = 0; 695 u32 frag_threshold = 0, rts_threshold = 0;
696 u8 coverage_class = 0;
684 697
685 rtnl_lock(); 698 rtnl_lock();
686 699
@@ -803,9 +816,16 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
803 changed |= WIPHY_PARAM_RTS_THRESHOLD; 816 changed |= WIPHY_PARAM_RTS_THRESHOLD;
804 } 817 }
805 818
819 if (info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]) {
820 coverage_class = nla_get_u8(
821 info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]);
822 changed |= WIPHY_PARAM_COVERAGE_CLASS;
823 }
824
806 if (changed) { 825 if (changed) {
807 u8 old_retry_short, old_retry_long; 826 u8 old_retry_short, old_retry_long;
808 u32 old_frag_threshold, old_rts_threshold; 827 u32 old_frag_threshold, old_rts_threshold;
828 u8 old_coverage_class;
809 829
810 if (!rdev->ops->set_wiphy_params) { 830 if (!rdev->ops->set_wiphy_params) {
811 result = -EOPNOTSUPP; 831 result = -EOPNOTSUPP;
@@ -816,6 +836,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
816 old_retry_long = rdev->wiphy.retry_long; 836 old_retry_long = rdev->wiphy.retry_long;
817 old_frag_threshold = rdev->wiphy.frag_threshold; 837 old_frag_threshold = rdev->wiphy.frag_threshold;
818 old_rts_threshold = rdev->wiphy.rts_threshold; 838 old_rts_threshold = rdev->wiphy.rts_threshold;
839 old_coverage_class = rdev->wiphy.coverage_class;
819 840
820 if (changed & WIPHY_PARAM_RETRY_SHORT) 841 if (changed & WIPHY_PARAM_RETRY_SHORT)
821 rdev->wiphy.retry_short = retry_short; 842 rdev->wiphy.retry_short = retry_short;
@@ -825,6 +846,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
825 rdev->wiphy.frag_threshold = frag_threshold; 846 rdev->wiphy.frag_threshold = frag_threshold;
826 if (changed & WIPHY_PARAM_RTS_THRESHOLD) 847 if (changed & WIPHY_PARAM_RTS_THRESHOLD)
827 rdev->wiphy.rts_threshold = rts_threshold; 848 rdev->wiphy.rts_threshold = rts_threshold;
849 if (changed & WIPHY_PARAM_COVERAGE_CLASS)
850 rdev->wiphy.coverage_class = coverage_class;
828 851
829 result = rdev->ops->set_wiphy_params(&rdev->wiphy, changed); 852 result = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
830 if (result) { 853 if (result) {
@@ -832,6 +855,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
832 rdev->wiphy.retry_long = old_retry_long; 855 rdev->wiphy.retry_long = old_retry_long;
833 rdev->wiphy.frag_threshold = old_frag_threshold; 856 rdev->wiphy.frag_threshold = old_frag_threshold;
834 rdev->wiphy.rts_threshold = old_rts_threshold; 857 rdev->wiphy.rts_threshold = old_rts_threshold;
858 rdev->wiphy.coverage_class = old_coverage_class;
835 } 859 }
836 } 860 }
837 861
@@ -1637,42 +1661,9 @@ static int parse_station_flags(struct genl_info *info,
1637 return 0; 1661 return 0;
1638} 1662}
1639 1663
1640static u16 nl80211_calculate_bitrate(struct rate_info *rate)
1641{
1642 int modulation, streams, bitrate;
1643
1644 if (!(rate->flags & RATE_INFO_FLAGS_MCS))
1645 return rate->legacy;
1646
1647 /* the formula below does only work for MCS values smaller than 32 */
1648 if (rate->mcs >= 32)
1649 return 0;
1650
1651 modulation = rate->mcs & 7;
1652 streams = (rate->mcs >> 3) + 1;
1653
1654 bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
1655 13500000 : 6500000;
1656
1657 if (modulation < 4)
1658 bitrate *= (modulation + 1);
1659 else if (modulation == 4)
1660 bitrate *= (modulation + 2);
1661 else
1662 bitrate *= (modulation + 3);
1663
1664 bitrate *= streams;
1665
1666 if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
1667 bitrate = (bitrate / 9) * 10;
1668
1669 /* do NOT round down here */
1670 return (bitrate + 50000) / 100000;
1671}
1672
1673static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, 1664static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
1674 int flags, struct net_device *dev, 1665 int flags, struct net_device *dev,
1675 u8 *mac_addr, struct station_info *sinfo) 1666 const u8 *mac_addr, struct station_info *sinfo)
1676{ 1667{
1677 void *hdr; 1668 void *hdr;
1678 struct nlattr *sinfoattr, *txrate; 1669 struct nlattr *sinfoattr, *txrate;
@@ -1716,8 +1707,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
1716 if (!txrate) 1707 if (!txrate)
1717 goto nla_put_failure; 1708 goto nla_put_failure;
1718 1709
1719 /* nl80211_calculate_bitrate will return 0 for mcs >= 32 */ 1710 /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
1720 bitrate = nl80211_calculate_bitrate(&sinfo->txrate); 1711 bitrate = cfg80211_calculate_bitrate(&sinfo->txrate);
1721 if (bitrate > 0) 1712 if (bitrate > 0)
1722 NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate); 1713 NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
1723 1714
@@ -2023,6 +2014,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
2023 if (!info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) 2014 if (!info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES])
2024 return -EINVAL; 2015 return -EINVAL;
2025 2016
2017 if (!info->attrs[NL80211_ATTR_STA_AID])
2018 return -EINVAL;
2019
2026 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); 2020 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
2027 params.supported_rates = 2021 params.supported_rates =
2028 nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); 2022 nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]);
@@ -2031,11 +2025,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
2031 params.listen_interval = 2025 params.listen_interval =
2032 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); 2026 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
2033 2027
2034 if (info->attrs[NL80211_ATTR_STA_AID]) { 2028 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
2035 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); 2029 if (!params.aid || params.aid > IEEE80211_MAX_AID)
2036 if (!params.aid || params.aid > IEEE80211_MAX_AID) 2030 return -EINVAL;
2037 return -EINVAL;
2038 }
2039 2031
2040 if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) 2032 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
2041 params.ht_capa = 2033 params.ht_capa =
@@ -2050,6 +2042,12 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
2050 if (err) 2042 if (err)
2051 goto out_rtnl; 2043 goto out_rtnl;
2052 2044
2045 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2046 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
2047 err = -EINVAL;
2048 goto out;
2049 }
2050
2053 err = get_vlan(info, rdev, &params.vlan); 2051 err = get_vlan(info, rdev, &params.vlan);
2054 if (err) 2052 if (err)
2055 goto out; 2053 goto out;
@@ -2057,35 +2055,6 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
2057 /* validate settings */ 2055 /* validate settings */
2058 err = 0; 2056 err = 0;
2059 2057
2060 switch (dev->ieee80211_ptr->iftype) {
2061 case NL80211_IFTYPE_AP:
2062 case NL80211_IFTYPE_AP_VLAN:
2063 /* all ok but must have AID */
2064 if (!params.aid)
2065 err = -EINVAL;
2066 break;
2067 case NL80211_IFTYPE_MESH_POINT:
2068 /* disallow things mesh doesn't support */
2069 if (params.vlan)
2070 err = -EINVAL;
2071 if (params.aid)
2072 err = -EINVAL;
2073 if (params.ht_capa)
2074 err = -EINVAL;
2075 if (params.listen_interval >= 0)
2076 err = -EINVAL;
2077 if (params.supported_rates)
2078 err = -EINVAL;
2079 if (params.sta_flags_mask)
2080 err = -EINVAL;
2081 break;
2082 default:
2083 err = -EINVAL;
2084 }
2085
2086 if (err)
2087 goto out;
2088
2089 if (!rdev->ops->add_station) { 2058 if (!rdev->ops->add_station) {
2090 err = -EOPNOTSUPP; 2059 err = -EOPNOTSUPP;
2091 goto out; 2060 goto out;
@@ -2126,8 +2095,7 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
2126 goto out_rtnl; 2095 goto out_rtnl;
2127 2096
2128 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2097 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2129 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && 2098 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
2130 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
2131 err = -EINVAL; 2099 err = -EINVAL;
2132 goto out; 2100 goto out;
2133 } 2101 }
@@ -2514,8 +2482,7 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
2514 return err; 2482 return err;
2515} 2483}
2516 2484
2517static const struct nla_policy 2485static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
2518 reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
2519 [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 }, 2486 [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 },
2520 [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 }, 2487 [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 },
2521 [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 }, 2488 [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 },
@@ -2583,12 +2550,6 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
2583 2550
2584 data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); 2551 data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
2585 2552
2586#ifdef CONFIG_WIRELESS_OLD_REGULATORY
2587 /* We ignore world regdom requests with the old regdom setup */
2588 if (is_world_regdom(data))
2589 return -EINVAL;
2590#endif
2591
2592 r = regulatory_hint_user(data); 2553 r = regulatory_hint_user(data);
2593 2554
2594 return r; 2555 return r;
@@ -2690,8 +2651,7 @@ do {\
2690 } \ 2651 } \
2691} while (0);\ 2652} while (0);\
2692 2653
2693static struct nla_policy 2654static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = {
2694nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] __read_mostly = {
2695 [NL80211_MESHCONF_RETRY_TIMEOUT] = { .type = NLA_U16 }, 2655 [NL80211_MESHCONF_RETRY_TIMEOUT] = { .type = NLA_U16 },
2696 [NL80211_MESHCONF_CONFIRM_TIMEOUT] = { .type = NLA_U16 }, 2656 [NL80211_MESHCONF_CONFIRM_TIMEOUT] = { .type = NLA_U16 },
2697 [NL80211_MESHCONF_HOLDING_TIMEOUT] = { .type = NLA_U16 }, 2657 [NL80211_MESHCONF_HOLDING_TIMEOUT] = { .type = NLA_U16 },
@@ -3182,6 +3142,10 @@ static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
3182 NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS, 3142 NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS,
3183 res->len_information_elements, 3143 res->len_information_elements,
3184 res->information_elements); 3144 res->information_elements);
3145 if (res->beacon_ies && res->len_beacon_ies &&
3146 res->beacon_ies != res->information_elements)
3147 NLA_PUT(msg, NL80211_BSS_BEACON_IES,
3148 res->len_beacon_ies, res->beacon_ies);
3185 if (res->tsf) 3149 if (res->tsf)
3186 NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf); 3150 NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf);
3187 if (res->beacon_interval) 3151 if (res->beacon_interval)
@@ -3586,6 +3550,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
3586{ 3550{
3587 struct cfg80211_registered_device *rdev; 3551 struct cfg80211_registered_device *rdev;
3588 struct net_device *dev; 3552 struct net_device *dev;
3553 struct wireless_dev *wdev;
3589 struct cfg80211_crypto_settings crypto; 3554 struct cfg80211_crypto_settings crypto;
3590 struct ieee80211_channel *chan, *fixedchan; 3555 struct ieee80211_channel *chan, *fixedchan;
3591 const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL; 3556 const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL;
@@ -3631,7 +3596,8 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
3631 } 3596 }
3632 3597
3633 mutex_lock(&rdev->devlist_mtx); 3598 mutex_lock(&rdev->devlist_mtx);
3634 fixedchan = rdev_fixed_channel(rdev, NULL); 3599 wdev = dev->ieee80211_ptr;
3600 fixedchan = rdev_fixed_channel(rdev, wdev);
3635 if (fixedchan && chan != fixedchan) { 3601 if (fixedchan && chan != fixedchan) {
3636 err = -EBUSY; 3602 err = -EBUSY;
3637 mutex_unlock(&rdev->devlist_mtx); 3603 mutex_unlock(&rdev->devlist_mtx);
@@ -4322,6 +4288,496 @@ static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info)
4322 4288
4323} 4289}
4324 4290
4291static int nl80211_remain_on_channel(struct sk_buff *skb,
4292 struct genl_info *info)
4293{
4294 struct cfg80211_registered_device *rdev;
4295 struct net_device *dev;
4296 struct ieee80211_channel *chan;
4297 struct sk_buff *msg;
4298 void *hdr;
4299 u64 cookie;
4300 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
4301 u32 freq, duration;
4302 int err;
4303
4304 if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
4305 !info->attrs[NL80211_ATTR_DURATION])
4306 return -EINVAL;
4307
4308 duration = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
4309
4310 /*
4311 * We should be on that channel for at least one jiffie,
4312 * and more than 5 seconds seems excessive.
4313 */
4314 if (!duration || !msecs_to_jiffies(duration) || duration > 5000)
4315 return -EINVAL;
4316
4317 rtnl_lock();
4318
4319 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4320 if (err)
4321 goto unlock_rtnl;
4322
4323 if (!rdev->ops->remain_on_channel) {
4324 err = -EOPNOTSUPP;
4325 goto out;
4326 }
4327
4328 if (!netif_running(dev)) {
4329 err = -ENETDOWN;
4330 goto out;
4331 }
4332
4333 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
4334 channel_type = nla_get_u32(
4335 info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
4336 if (channel_type != NL80211_CHAN_NO_HT &&
4337 channel_type != NL80211_CHAN_HT20 &&
4338 channel_type != NL80211_CHAN_HT40PLUS &&
4339 channel_type != NL80211_CHAN_HT40MINUS)
4340 err = -EINVAL;
4341 goto out;
4342 }
4343
4344 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
4345 chan = rdev_freq_to_chan(rdev, freq, channel_type);
4346 if (chan == NULL) {
4347 err = -EINVAL;
4348 goto out;
4349 }
4350
4351 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
4352 if (!msg) {
4353 err = -ENOMEM;
4354 goto out;
4355 }
4356
4357 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
4358 NL80211_CMD_REMAIN_ON_CHANNEL);
4359
4360 if (IS_ERR(hdr)) {
4361 err = PTR_ERR(hdr);
4362 goto free_msg;
4363 }
4364
4365 err = rdev->ops->remain_on_channel(&rdev->wiphy, dev, chan,
4366 channel_type, duration, &cookie);
4367
4368 if (err)
4369 goto free_msg;
4370
4371 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
4372
4373 genlmsg_end(msg, hdr);
4374 err = genlmsg_reply(msg, info);
4375 goto out;
4376
4377 nla_put_failure:
4378 err = -ENOBUFS;
4379 free_msg:
4380 nlmsg_free(msg);
4381 out:
4382 cfg80211_unlock_rdev(rdev);
4383 dev_put(dev);
4384 unlock_rtnl:
4385 rtnl_unlock();
4386 return err;
4387}
4388
4389static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
4390 struct genl_info *info)
4391{
4392 struct cfg80211_registered_device *rdev;
4393 struct net_device *dev;
4394 u64 cookie;
4395 int err;
4396
4397 if (!info->attrs[NL80211_ATTR_COOKIE])
4398 return -EINVAL;
4399
4400 rtnl_lock();
4401
4402 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4403 if (err)
4404 goto unlock_rtnl;
4405
4406 if (!rdev->ops->cancel_remain_on_channel) {
4407 err = -EOPNOTSUPP;
4408 goto out;
4409 }
4410
4411 if (!netif_running(dev)) {
4412 err = -ENETDOWN;
4413 goto out;
4414 }
4415
4416 cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
4417
4418 err = rdev->ops->cancel_remain_on_channel(&rdev->wiphy, dev, cookie);
4419
4420 out:
4421 cfg80211_unlock_rdev(rdev);
4422 dev_put(dev);
4423 unlock_rtnl:
4424 rtnl_unlock();
4425 return err;
4426}
4427
4428static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
4429 u8 *rates, u8 rates_len)
4430{
4431 u8 i;
4432 u32 mask = 0;
4433
4434 for (i = 0; i < rates_len; i++) {
4435 int rate = (rates[i] & 0x7f) * 5;
4436 int ridx;
4437 for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
4438 struct ieee80211_rate *srate =
4439 &sband->bitrates[ridx];
4440 if (rate == srate->bitrate) {
4441 mask |= 1 << ridx;
4442 break;
4443 }
4444 }
4445 if (ridx == sband->n_bitrates)
4446 return 0; /* rate not found */
4447 }
4448
4449 return mask;
4450}
4451
4452static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
4453 [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
4454 .len = NL80211_MAX_SUPP_RATES },
4455};
4456
4457static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
4458 struct genl_info *info)
4459{
4460 struct nlattr *tb[NL80211_TXRATE_MAX + 1];
4461 struct cfg80211_registered_device *rdev;
4462 struct cfg80211_bitrate_mask mask;
4463 int err, rem, i;
4464 struct net_device *dev;
4465 struct nlattr *tx_rates;
4466 struct ieee80211_supported_band *sband;
4467
4468 if (info->attrs[NL80211_ATTR_TX_RATES] == NULL)
4469 return -EINVAL;
4470
4471 rtnl_lock();
4472
4473 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4474 if (err)
4475 goto unlock_rtnl;
4476
4477 if (!rdev->ops->set_bitrate_mask) {
4478 err = -EOPNOTSUPP;
4479 goto unlock;
4480 }
4481
4482 memset(&mask, 0, sizeof(mask));
4483 /* Default to all rates enabled */
4484 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
4485 sband = rdev->wiphy.bands[i];
4486 mask.control[i].legacy =
4487 sband ? (1 << sband->n_bitrates) - 1 : 0;
4488 }
4489
4490 /*
4491 * The nested attribute uses enum nl80211_band as the index. This maps
4492 * directly to the enum ieee80211_band values used in cfg80211.
4493 */
4494 nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem)
4495 {
4496 enum ieee80211_band band = nla_type(tx_rates);
4497 if (band < 0 || band >= IEEE80211_NUM_BANDS) {
4498 err = -EINVAL;
4499 goto unlock;
4500 }
4501 sband = rdev->wiphy.bands[band];
4502 if (sband == NULL) {
4503 err = -EINVAL;
4504 goto unlock;
4505 }
4506 nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
4507 nla_len(tx_rates), nl80211_txattr_policy);
4508 if (tb[NL80211_TXRATE_LEGACY]) {
4509 mask.control[band].legacy = rateset_to_mask(
4510 sband,
4511 nla_data(tb[NL80211_TXRATE_LEGACY]),
4512 nla_len(tb[NL80211_TXRATE_LEGACY]));
4513 if (mask.control[band].legacy == 0) {
4514 err = -EINVAL;
4515 goto unlock;
4516 }
4517 }
4518 }
4519
4520 err = rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, NULL, &mask);
4521
4522 unlock:
4523 dev_put(dev);
4524 cfg80211_unlock_rdev(rdev);
4525 unlock_rtnl:
4526 rtnl_unlock();
4527 return err;
4528}
4529
4530static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info)
4531{
4532 struct cfg80211_registered_device *rdev;
4533 struct net_device *dev;
4534 int err;
4535
4536 if (!info->attrs[NL80211_ATTR_FRAME_MATCH])
4537 return -EINVAL;
4538
4539 if (nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]) < 1)
4540 return -EINVAL;
4541
4542 rtnl_lock();
4543
4544 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4545 if (err)
4546 goto unlock_rtnl;
4547
4548 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
4549 err = -EOPNOTSUPP;
4550 goto out;
4551 }
4552
4553 /* not much point in registering if we can't reply */
4554 if (!rdev->ops->action) {
4555 err = -EOPNOTSUPP;
4556 goto out;
4557 }
4558
4559 err = cfg80211_mlme_register_action(dev->ieee80211_ptr, info->snd_pid,
4560 nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]),
4561 nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]));
4562 out:
4563 cfg80211_unlock_rdev(rdev);
4564 dev_put(dev);
4565 unlock_rtnl:
4566 rtnl_unlock();
4567 return err;
4568}
4569
4570static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
4571{
4572 struct cfg80211_registered_device *rdev;
4573 struct net_device *dev;
4574 struct ieee80211_channel *chan;
4575 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
4576 u32 freq;
4577 int err;
4578 void *hdr;
4579 u64 cookie;
4580 struct sk_buff *msg;
4581
4582 if (!info->attrs[NL80211_ATTR_FRAME] ||
4583 !info->attrs[NL80211_ATTR_WIPHY_FREQ])
4584 return -EINVAL;
4585
4586 rtnl_lock();
4587
4588 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4589 if (err)
4590 goto unlock_rtnl;
4591
4592 if (!rdev->ops->action) {
4593 err = -EOPNOTSUPP;
4594 goto out;
4595 }
4596
4597 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
4598 err = -EOPNOTSUPP;
4599 goto out;
4600 }
4601
4602 if (!netif_running(dev)) {
4603 err = -ENETDOWN;
4604 goto out;
4605 }
4606
4607 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
4608 channel_type = nla_get_u32(
4609 info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
4610 if (channel_type != NL80211_CHAN_NO_HT &&
4611 channel_type != NL80211_CHAN_HT20 &&
4612 channel_type != NL80211_CHAN_HT40PLUS &&
4613 channel_type != NL80211_CHAN_HT40MINUS)
4614 err = -EINVAL;
4615 goto out;
4616 }
4617
4618 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
4619 chan = rdev_freq_to_chan(rdev, freq, channel_type);
4620 if (chan == NULL) {
4621 err = -EINVAL;
4622 goto out;
4623 }
4624
4625 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
4626 if (!msg) {
4627 err = -ENOMEM;
4628 goto out;
4629 }
4630
4631 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
4632 NL80211_CMD_ACTION);
4633
4634 if (IS_ERR(hdr)) {
4635 err = PTR_ERR(hdr);
4636 goto free_msg;
4637 }
4638 err = cfg80211_mlme_action(rdev, dev, chan, channel_type,
4639 nla_data(info->attrs[NL80211_ATTR_FRAME]),
4640 nla_len(info->attrs[NL80211_ATTR_FRAME]),
4641 &cookie);
4642 if (err)
4643 goto free_msg;
4644
4645 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
4646
4647 genlmsg_end(msg, hdr);
4648 err = genlmsg_reply(msg, info);
4649 goto out;
4650
4651 nla_put_failure:
4652 err = -ENOBUFS;
4653 free_msg:
4654 nlmsg_free(msg);
4655 out:
4656 cfg80211_unlock_rdev(rdev);
4657 dev_put(dev);
4658unlock_rtnl:
4659 rtnl_unlock();
4660 return err;
4661}
4662
4663static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info)
4664{
4665 struct cfg80211_registered_device *rdev;
4666 struct wireless_dev *wdev;
4667 struct net_device *dev;
4668 u8 ps_state;
4669 bool state;
4670 int err;
4671
4672 if (!info->attrs[NL80211_ATTR_PS_STATE]) {
4673 err = -EINVAL;
4674 goto out;
4675 }
4676
4677 ps_state = nla_get_u32(info->attrs[NL80211_ATTR_PS_STATE]);
4678
4679 if (ps_state != NL80211_PS_DISABLED && ps_state != NL80211_PS_ENABLED) {
4680 err = -EINVAL;
4681 goto out;
4682 }
4683
4684 rtnl_lock();
4685
4686 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4687 if (err)
4688 goto unlock_rdev;
4689
4690 wdev = dev->ieee80211_ptr;
4691
4692 if (!rdev->ops->set_power_mgmt) {
4693 err = -EOPNOTSUPP;
4694 goto unlock_rdev;
4695 }
4696
4697 state = (ps_state == NL80211_PS_ENABLED) ? true : false;
4698
4699 if (state == wdev->ps)
4700 goto unlock_rdev;
4701
4702 wdev->ps = state;
4703
4704 if (rdev->ops->set_power_mgmt(wdev->wiphy, dev, wdev->ps,
4705 wdev->ps_timeout))
4706 /* assume this means it's off */
4707 wdev->ps = false;
4708
4709unlock_rdev:
4710 cfg80211_unlock_rdev(rdev);
4711 dev_put(dev);
4712 rtnl_unlock();
4713
4714out:
4715 return err;
4716}
4717
4718static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info)
4719{
4720 struct cfg80211_registered_device *rdev;
4721 enum nl80211_ps_state ps_state;
4722 struct wireless_dev *wdev;
4723 struct net_device *dev;
4724 struct sk_buff *msg;
4725 void *hdr;
4726 int err;
4727
4728 rtnl_lock();
4729
4730 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4731 if (err)
4732 goto unlock_rtnl;
4733
4734 wdev = dev->ieee80211_ptr;
4735
4736 if (!rdev->ops->set_power_mgmt) {
4737 err = -EOPNOTSUPP;
4738 goto out;
4739 }
4740
4741 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
4742 if (!msg) {
4743 err = -ENOMEM;
4744 goto out;
4745 }
4746
4747 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
4748 NL80211_CMD_GET_POWER_SAVE);
4749 if (!hdr) {
4750 err = -ENOMEM;
4751 goto free_msg;
4752 }
4753
4754 if (wdev->ps)
4755 ps_state = NL80211_PS_ENABLED;
4756 else
4757 ps_state = NL80211_PS_DISABLED;
4758
4759 NLA_PUT_U32(msg, NL80211_ATTR_PS_STATE, ps_state);
4760
4761 genlmsg_end(msg, hdr);
4762 err = genlmsg_reply(msg, info);
4763 goto out;
4764
4765nla_put_failure:
4766 err = -ENOBUFS;
4767
4768free_msg:
4769 nlmsg_free(msg);
4770
4771out:
4772 cfg80211_unlock_rdev(rdev);
4773 dev_put(dev);
4774
4775unlock_rtnl:
4776 rtnl_unlock();
4777
4778 return err;
4779}
4780
4325static struct genl_ops nl80211_ops[] = { 4781static struct genl_ops nl80211_ops[] = {
4326 { 4782 {
4327 .cmd = NL80211_CMD_GET_WIPHY, 4783 .cmd = NL80211_CMD_GET_WIPHY,
@@ -4584,8 +5040,50 @@ static struct genl_ops nl80211_ops[] = {
4584 .policy = nl80211_policy, 5040 .policy = nl80211_policy,
4585 .flags = GENL_ADMIN_PERM, 5041 .flags = GENL_ADMIN_PERM,
4586 }, 5042 },
4587 5043 {
5044 .cmd = NL80211_CMD_REMAIN_ON_CHANNEL,
5045 .doit = nl80211_remain_on_channel,
5046 .policy = nl80211_policy,
5047 .flags = GENL_ADMIN_PERM,
5048 },
5049 {
5050 .cmd = NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
5051 .doit = nl80211_cancel_remain_on_channel,
5052 .policy = nl80211_policy,
5053 .flags = GENL_ADMIN_PERM,
5054 },
5055 {
5056 .cmd = NL80211_CMD_SET_TX_BITRATE_MASK,
5057 .doit = nl80211_set_tx_bitrate_mask,
5058 .policy = nl80211_policy,
5059 .flags = GENL_ADMIN_PERM,
5060 },
5061 {
5062 .cmd = NL80211_CMD_REGISTER_ACTION,
5063 .doit = nl80211_register_action,
5064 .policy = nl80211_policy,
5065 .flags = GENL_ADMIN_PERM,
5066 },
5067 {
5068 .cmd = NL80211_CMD_ACTION,
5069 .doit = nl80211_action,
5070 .policy = nl80211_policy,
5071 .flags = GENL_ADMIN_PERM,
5072 },
5073 {
5074 .cmd = NL80211_CMD_SET_POWER_SAVE,
5075 .doit = nl80211_set_power_save,
5076 .policy = nl80211_policy,
5077 .flags = GENL_ADMIN_PERM,
5078 },
5079 {
5080 .cmd = NL80211_CMD_GET_POWER_SAVE,
5081 .doit = nl80211_get_power_save,
5082 .policy = nl80211_policy,
5083 /* can be retrieved by unprivileged users */
5084 },
4588}; 5085};
5086
4589static struct genl_multicast_group nl80211_mlme_mcgrp = { 5087static struct genl_multicast_group nl80211_mlme_mcgrp = {
4590 .name = "mlme", 5088 .name = "mlme",
4591}; 5089};
@@ -5173,6 +5671,193 @@ nla_put_failure:
5173 nlmsg_free(msg); 5671 nlmsg_free(msg);
5174} 5672}
5175 5673
5674static void nl80211_send_remain_on_chan_event(
5675 int cmd, struct cfg80211_registered_device *rdev,
5676 struct net_device *netdev, u64 cookie,
5677 struct ieee80211_channel *chan,
5678 enum nl80211_channel_type channel_type,
5679 unsigned int duration, gfp_t gfp)
5680{
5681 struct sk_buff *msg;
5682 void *hdr;
5683
5684 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
5685 if (!msg)
5686 return;
5687
5688 hdr = nl80211hdr_put(msg, 0, 0, 0, cmd);
5689 if (!hdr) {
5690 nlmsg_free(msg);
5691 return;
5692 }
5693
5694 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
5695 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
5696 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq);
5697 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type);
5698 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
5699
5700 if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL)
5701 NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration);
5702
5703 if (genlmsg_end(msg, hdr) < 0) {
5704 nlmsg_free(msg);
5705 return;
5706 }
5707
5708 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
5709 nl80211_mlme_mcgrp.id, gfp);
5710 return;
5711
5712 nla_put_failure:
5713 genlmsg_cancel(msg, hdr);
5714 nlmsg_free(msg);
5715}
5716
5717void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
5718 struct net_device *netdev, u64 cookie,
5719 struct ieee80211_channel *chan,
5720 enum nl80211_channel_type channel_type,
5721 unsigned int duration, gfp_t gfp)
5722{
5723 nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL,
5724 rdev, netdev, cookie, chan,
5725 channel_type, duration, gfp);
5726}
5727
5728void nl80211_send_remain_on_channel_cancel(
5729 struct cfg80211_registered_device *rdev, struct net_device *netdev,
5730 u64 cookie, struct ieee80211_channel *chan,
5731 enum nl80211_channel_type channel_type, gfp_t gfp)
5732{
5733 nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
5734 rdev, netdev, cookie, chan,
5735 channel_type, 0, gfp);
5736}
5737
5738void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
5739 struct net_device *dev, const u8 *mac_addr,
5740 struct station_info *sinfo, gfp_t gfp)
5741{
5742 struct sk_buff *msg;
5743
5744 msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
5745 if (!msg)
5746 return;
5747
5748 if (nl80211_send_station(msg, 0, 0, 0, dev, mac_addr, sinfo) < 0) {
5749 nlmsg_free(msg);
5750 return;
5751 }
5752
5753 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
5754 nl80211_mlme_mcgrp.id, gfp);
5755}
5756
5757int nl80211_send_action(struct cfg80211_registered_device *rdev,
5758 struct net_device *netdev, u32 nlpid,
5759 int freq, const u8 *buf, size_t len, gfp_t gfp)
5760{
5761 struct sk_buff *msg;
5762 void *hdr;
5763 int err;
5764
5765 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
5766 if (!msg)
5767 return -ENOMEM;
5768
5769 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ACTION);
5770 if (!hdr) {
5771 nlmsg_free(msg);
5772 return -ENOMEM;
5773 }
5774
5775 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
5776 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
5777 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
5778 NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
5779
5780 err = genlmsg_end(msg, hdr);
5781 if (err < 0) {
5782 nlmsg_free(msg);
5783 return err;
5784 }
5785
5786 err = genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
5787 if (err < 0)
5788 return err;
5789 return 0;
5790
5791 nla_put_failure:
5792 genlmsg_cancel(msg, hdr);
5793 nlmsg_free(msg);
5794 return -ENOBUFS;
5795}
5796
5797void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
5798 struct net_device *netdev, u64 cookie,
5799 const u8 *buf, size_t len, bool ack,
5800 gfp_t gfp)
5801{
5802 struct sk_buff *msg;
5803 void *hdr;
5804
5805 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
5806 if (!msg)
5807 return;
5808
5809 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ACTION_TX_STATUS);
5810 if (!hdr) {
5811 nlmsg_free(msg);
5812 return;
5813 }
5814
5815 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
5816 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
5817 NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
5818 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
5819 if (ack)
5820 NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
5821
5822 if (genlmsg_end(msg, hdr) < 0) {
5823 nlmsg_free(msg);
5824 return;
5825 }
5826
5827 genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp);
5828 return;
5829
5830 nla_put_failure:
5831 genlmsg_cancel(msg, hdr);
5832 nlmsg_free(msg);
5833}
5834
5835static int nl80211_netlink_notify(struct notifier_block * nb,
5836 unsigned long state,
5837 void *_notify)
5838{
5839 struct netlink_notify *notify = _notify;
5840 struct cfg80211_registered_device *rdev;
5841 struct wireless_dev *wdev;
5842
5843 if (state != NETLINK_URELEASE)
5844 return NOTIFY_DONE;
5845
5846 rcu_read_lock();
5847
5848 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list)
5849 list_for_each_entry_rcu(wdev, &rdev->netdev_list, list)
5850 cfg80211_mlme_unregister_actions(wdev, notify->pid);
5851
5852 rcu_read_unlock();
5853
5854 return NOTIFY_DONE;
5855}
5856
5857static struct notifier_block nl80211_netlink_notifier = {
5858 .notifier_call = nl80211_netlink_notify,
5859};
5860
5176/* initialisation/exit functions */ 5861/* initialisation/exit functions */
5177 5862
5178int nl80211_init(void) 5863int nl80211_init(void)
@@ -5206,6 +5891,10 @@ int nl80211_init(void)
5206 goto err_out; 5891 goto err_out;
5207#endif 5892#endif
5208 5893
5894 err = netlink_register_notifier(&nl80211_netlink_notifier);
5895 if (err)
5896 goto err_out;
5897
5209 return 0; 5898 return 0;
5210 err_out: 5899 err_out:
5211 genl_unregister_family(&nl80211_fam); 5900 genl_unregister_family(&nl80211_fam);
@@ -5214,5 +5903,6 @@ int nl80211_init(void)
5214 5903
5215void nl80211_exit(void) 5904void nl80211_exit(void)
5216{ 5905{
5906 netlink_unregister_notifier(&nl80211_netlink_notifier);
5217 genl_unregister_family(&nl80211_fam); 5907 genl_unregister_family(&nl80211_fam);
5218} 5908}
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 44cc2a76a1b0..4ca511102c6c 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -59,4 +59,27 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
59 struct net_device *netdev, const u8 *bssid, 59 struct net_device *netdev, const u8 *bssid,
60 gfp_t gfp); 60 gfp_t gfp);
61 61
62void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
63 struct net_device *netdev,
64 u64 cookie,
65 struct ieee80211_channel *chan,
66 enum nl80211_channel_type channel_type,
67 unsigned int duration, gfp_t gfp);
68void nl80211_send_remain_on_channel_cancel(
69 struct cfg80211_registered_device *rdev, struct net_device *netdev,
70 u64 cookie, struct ieee80211_channel *chan,
71 enum nl80211_channel_type channel_type, gfp_t gfp);
72
73void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
74 struct net_device *dev, const u8 *mac_addr,
75 struct station_info *sinfo, gfp_t gfp);
76
77int nl80211_send_action(struct cfg80211_registered_device *rdev,
78 struct net_device *netdev, u32 nlpid, int freq,
79 const u8 *buf, size_t len, gfp_t gfp);
80void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
81 struct net_device *netdev, u64 cookie,
82 const u8 *buf, size_t len, bool ack,
83 gfp_t gfp);
84
62#endif /* __NET_WIRELESS_NL80211_H */ 85#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index f591871a7b4f..1332c445d1c7 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -2,6 +2,16 @@
2 * Radiotap parser 2 * Radiotap parser
3 * 3 *
4 * Copyright 2007 Andy Green <andy@warmcat.com> 4 * Copyright 2007 Andy Green <andy@warmcat.com>
5 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Alternatively, this software may be distributed under the terms of BSD
12 * license.
13 *
14 * See COPYING for more details.
5 */ 15 */
6 16
7#include <net/cfg80211.h> 17#include <net/cfg80211.h>
@@ -10,6 +20,35 @@
10 20
11/* function prototypes and related defs are in include/net/cfg80211.h */ 21/* function prototypes and related defs are in include/net/cfg80211.h */
12 22
23static const struct radiotap_align_size rtap_namespace_sizes[] = {
24 [IEEE80211_RADIOTAP_TSFT] = { .align = 8, .size = 8, },
25 [IEEE80211_RADIOTAP_FLAGS] = { .align = 1, .size = 1, },
26 [IEEE80211_RADIOTAP_RATE] = { .align = 1, .size = 1, },
27 [IEEE80211_RADIOTAP_CHANNEL] = { .align = 2, .size = 4, },
28 [IEEE80211_RADIOTAP_FHSS] = { .align = 2, .size = 2, },
29 [IEEE80211_RADIOTAP_DBM_ANTSIGNAL] = { .align = 1, .size = 1, },
30 [IEEE80211_RADIOTAP_DBM_ANTNOISE] = { .align = 1, .size = 1, },
31 [IEEE80211_RADIOTAP_LOCK_QUALITY] = { .align = 2, .size = 2, },
32 [IEEE80211_RADIOTAP_TX_ATTENUATION] = { .align = 2, .size = 2, },
33 [IEEE80211_RADIOTAP_DB_TX_ATTENUATION] = { .align = 2, .size = 2, },
34 [IEEE80211_RADIOTAP_DBM_TX_POWER] = { .align = 1, .size = 1, },
35 [IEEE80211_RADIOTAP_ANTENNA] = { .align = 1, .size = 1, },
36 [IEEE80211_RADIOTAP_DB_ANTSIGNAL] = { .align = 1, .size = 1, },
37 [IEEE80211_RADIOTAP_DB_ANTNOISE] = { .align = 1, .size = 1, },
38 [IEEE80211_RADIOTAP_RX_FLAGS] = { .align = 2, .size = 2, },
39 [IEEE80211_RADIOTAP_TX_FLAGS] = { .align = 2, .size = 2, },
40 [IEEE80211_RADIOTAP_RTS_RETRIES] = { .align = 1, .size = 1, },
41 [IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, },
42 /*
43 * add more here as they are defined in radiotap.h
44 */
45};
46
47static const struct ieee80211_radiotap_namespace radiotap_ns = {
48 .n_bits = sizeof(rtap_namespace_sizes) / sizeof(rtap_namespace_sizes[0]),
49 .align_size = rtap_namespace_sizes,
50};
51
13/** 52/**
14 * ieee80211_radiotap_iterator_init - radiotap parser iterator initialization 53 * ieee80211_radiotap_iterator_init - radiotap parser iterator initialization
15 * @iterator: radiotap_iterator to initialize 54 * @iterator: radiotap_iterator to initialize
@@ -50,9 +89,9 @@
50 */ 89 */
51 90
52int ieee80211_radiotap_iterator_init( 91int ieee80211_radiotap_iterator_init(
53 struct ieee80211_radiotap_iterator *iterator, 92 struct ieee80211_radiotap_iterator *iterator,
54 struct ieee80211_radiotap_header *radiotap_header, 93 struct ieee80211_radiotap_header *radiotap_header,
55 int max_length) 94 int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns)
56{ 95{
57 /* Linux only supports version 0 radiotap format */ 96 /* Linux only supports version 0 radiotap format */
58 if (radiotap_header->it_version) 97 if (radiotap_header->it_version)
@@ -62,19 +101,24 @@ int ieee80211_radiotap_iterator_init(
62 if (max_length < get_unaligned_le16(&radiotap_header->it_len)) 101 if (max_length < get_unaligned_le16(&radiotap_header->it_len))
63 return -EINVAL; 102 return -EINVAL;
64 103
65 iterator->rtheader = radiotap_header; 104 iterator->_rtheader = radiotap_header;
66 iterator->max_length = get_unaligned_le16(&radiotap_header->it_len); 105 iterator->_max_length = get_unaligned_le16(&radiotap_header->it_len);
67 iterator->arg_index = 0; 106 iterator->_arg_index = 0;
68 iterator->bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present); 107 iterator->_bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present);
69 iterator->arg = (u8 *)radiotap_header + sizeof(*radiotap_header); 108 iterator->_arg = (uint8_t *)radiotap_header + sizeof(*radiotap_header);
70 iterator->this_arg = NULL; 109 iterator->_reset_on_ext = 0;
110 iterator->_next_bitmap = &radiotap_header->it_present;
111 iterator->_next_bitmap++;
112 iterator->_vns = vns;
113 iterator->current_namespace = &radiotap_ns;
114 iterator->is_radiotap_ns = 1;
71 115
72 /* find payload start allowing for extended bitmap(s) */ 116 /* find payload start allowing for extended bitmap(s) */
73 117
74 if (unlikely(iterator->bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT))) { 118 if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) {
75 while (get_unaligned_le32(iterator->arg) & 119 while (get_unaligned_le32(iterator->_arg) &
76 (1 << IEEE80211_RADIOTAP_EXT)) { 120 (1 << IEEE80211_RADIOTAP_EXT)) {
77 iterator->arg += sizeof(u32); 121 iterator->_arg += sizeof(uint32_t);
78 122
79 /* 123 /*
80 * check for insanity where the present bitmaps 124 * check for insanity where the present bitmaps
@@ -82,12 +126,13 @@ int ieee80211_radiotap_iterator_init(
82 * stated radiotap header length 126 * stated radiotap header length
83 */ 127 */
84 128
85 if (((ulong)iterator->arg - 129 if ((unsigned long)iterator->_arg -
86 (ulong)iterator->rtheader) > iterator->max_length) 130 (unsigned long)iterator->_rtheader >
131 (unsigned long)iterator->_max_length)
87 return -EINVAL; 132 return -EINVAL;
88 } 133 }
89 134
90 iterator->arg += sizeof(u32); 135 iterator->_arg += sizeof(uint32_t);
91 136
92 /* 137 /*
93 * no need to check again for blowing past stated radiotap 138 * no need to check again for blowing past stated radiotap
@@ -96,12 +141,36 @@ int ieee80211_radiotap_iterator_init(
96 */ 141 */
97 } 142 }
98 143
144 iterator->this_arg = iterator->_arg;
145
99 /* we are all initialized happily */ 146 /* we are all initialized happily */
100 147
101 return 0; 148 return 0;
102} 149}
103EXPORT_SYMBOL(ieee80211_radiotap_iterator_init); 150EXPORT_SYMBOL(ieee80211_radiotap_iterator_init);
104 151
152static void find_ns(struct ieee80211_radiotap_iterator *iterator,
153 uint32_t oui, uint8_t subns)
154{
155 int i;
156
157 iterator->current_namespace = NULL;
158
159 if (!iterator->_vns)
160 return;
161
162 for (i = 0; i < iterator->_vns->n_ns; i++) {
163 if (iterator->_vns->ns[i].oui != oui)
164 continue;
165 if (iterator->_vns->ns[i].subns != subns)
166 continue;
167
168 iterator->current_namespace = &iterator->_vns->ns[i];
169 break;
170 }
171}
172
173
105 174
106/** 175/**
107 * ieee80211_radiotap_iterator_next - return next radiotap parser iterator arg 176 * ieee80211_radiotap_iterator_next - return next radiotap parser iterator arg
@@ -127,99 +196,80 @@ EXPORT_SYMBOL(ieee80211_radiotap_iterator_init);
127 */ 196 */
128 197
129int ieee80211_radiotap_iterator_next( 198int ieee80211_radiotap_iterator_next(
130 struct ieee80211_radiotap_iterator *iterator) 199 struct ieee80211_radiotap_iterator *iterator)
131{ 200{
132 201 while (1) {
133 /*
134 * small length lookup table for all radiotap types we heard of
135 * starting from b0 in the bitmap, so we can walk the payload
136 * area of the radiotap header
137 *
138 * There is a requirement to pad args, so that args
139 * of a given length must begin at a boundary of that length
140 * -- but note that compound args are allowed (eg, 2 x u16
141 * for IEEE80211_RADIOTAP_CHANNEL) so total arg length is not
142 * a reliable indicator of alignment requirement.
143 *
144 * upper nybble: content alignment for arg
145 * lower nybble: content length for arg
146 */
147
148 static const u8 rt_sizes[] = {
149 [IEEE80211_RADIOTAP_TSFT] = 0x88,
150 [IEEE80211_RADIOTAP_FLAGS] = 0x11,
151 [IEEE80211_RADIOTAP_RATE] = 0x11,
152 [IEEE80211_RADIOTAP_CHANNEL] = 0x24,
153 [IEEE80211_RADIOTAP_FHSS] = 0x22,
154 [IEEE80211_RADIOTAP_DBM_ANTSIGNAL] = 0x11,
155 [IEEE80211_RADIOTAP_DBM_ANTNOISE] = 0x11,
156 [IEEE80211_RADIOTAP_LOCK_QUALITY] = 0x22,
157 [IEEE80211_RADIOTAP_TX_ATTENUATION] = 0x22,
158 [IEEE80211_RADIOTAP_DB_TX_ATTENUATION] = 0x22,
159 [IEEE80211_RADIOTAP_DBM_TX_POWER] = 0x11,
160 [IEEE80211_RADIOTAP_ANTENNA] = 0x11,
161 [IEEE80211_RADIOTAP_DB_ANTSIGNAL] = 0x11,
162 [IEEE80211_RADIOTAP_DB_ANTNOISE] = 0x11,
163 [IEEE80211_RADIOTAP_RX_FLAGS] = 0x22,
164 [IEEE80211_RADIOTAP_TX_FLAGS] = 0x22,
165 [IEEE80211_RADIOTAP_RTS_RETRIES] = 0x11,
166 [IEEE80211_RADIOTAP_DATA_RETRIES] = 0x11,
167 /*
168 * add more here as they are defined in
169 * include/net/ieee80211_radiotap.h
170 */
171 };
172
173 /*
174 * for every radiotap entry we can at
175 * least skip (by knowing the length)...
176 */
177
178 while (iterator->arg_index < sizeof(rt_sizes)) {
179 int hit = 0; 202 int hit = 0;
180 int pad; 203 int pad, align, size, subns, vnslen;
204 uint32_t oui;
181 205
182 if (!(iterator->bitmap_shifter & 1)) 206 /* if no more EXT bits, that's it */
207 if ((iterator->_arg_index % 32) == IEEE80211_RADIOTAP_EXT &&
208 !(iterator->_bitmap_shifter & 1))
209 return -ENOENT;
210
211 if (!(iterator->_bitmap_shifter & 1))
183 goto next_entry; /* arg not present */ 212 goto next_entry; /* arg not present */
184 213
214 /* get alignment/size of data */
215 switch (iterator->_arg_index % 32) {
216 case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE:
217 case IEEE80211_RADIOTAP_EXT:
218 align = 1;
219 size = 0;
220 break;
221 case IEEE80211_RADIOTAP_VENDOR_NAMESPACE:
222 align = 2;
223 size = 6;
224 break;
225 default:
226 if (!iterator->current_namespace ||
227 iterator->_arg_index >= iterator->current_namespace->n_bits) {
228 if (iterator->current_namespace == &radiotap_ns)
229 return -ENOENT;
230 align = 0;
231 } else {
232 align = iterator->current_namespace->align_size[iterator->_arg_index].align;
233 size = iterator->current_namespace->align_size[iterator->_arg_index].size;
234 }
235 if (!align) {
236 /* skip all subsequent data */
237 iterator->_arg = iterator->_next_ns_data;
238 /* give up on this namespace */
239 iterator->current_namespace = NULL;
240 goto next_entry;
241 }
242 break;
243 }
244
185 /* 245 /*
186 * arg is present, account for alignment padding 246 * arg is present, account for alignment padding
187 * 8-bit args can be at any alignment
188 * 16-bit args must start on 16-bit boundary
189 * 32-bit args must start on 32-bit boundary
190 * 64-bit args must start on 64-bit boundary
191 * 247 *
192 * note that total arg size can differ from alignment of 248 * Note that these alignments are relative to the start
193 * elements inside arg, so we use upper nybble of length 249 * of the radiotap header. There is no guarantee
194 * table to base alignment on
195 *
196 * also note: these alignments are ** relative to the
197 * start of the radiotap header **. There is no guarantee
198 * that the radiotap header itself is aligned on any 250 * that the radiotap header itself is aligned on any
199 * kind of boundary. 251 * kind of boundary.
200 * 252 *
201 * the above is why get_unaligned() is used to dereference 253 * The above is why get_unaligned() is used to dereference
202 * multibyte elements from the radiotap area 254 * multibyte elements from the radiotap area.
203 */ 255 */
204 256
205 pad = (((ulong)iterator->arg) - 257 pad = ((unsigned long)iterator->_arg -
206 ((ulong)iterator->rtheader)) & 258 (unsigned long)iterator->_rtheader) & (align - 1);
207 ((rt_sizes[iterator->arg_index] >> 4) - 1);
208 259
209 if (pad) 260 if (pad)
210 iterator->arg += 261 iterator->_arg += align - pad;
211 (rt_sizes[iterator->arg_index] >> 4) - pad;
212 262
213 /* 263 /*
214 * this is what we will return to user, but we need to 264 * this is what we will return to user, but we need to
215 * move on first so next call has something fresh to test 265 * move on first so next call has something fresh to test
216 */ 266 */
217 iterator->this_arg_index = iterator->arg_index; 267 iterator->this_arg_index = iterator->_arg_index;
218 iterator->this_arg = iterator->arg; 268 iterator->this_arg = iterator->_arg;
219 hit = 1; 269 iterator->this_arg_size = size;
220 270
221 /* internally move on the size of this arg */ 271 /* internally move on the size of this arg */
222 iterator->arg += rt_sizes[iterator->arg_index] & 0x0f; 272 iterator->_arg += size;
223 273
224 /* 274 /*
225 * check for insanity where we are given a bitmap that 275 * check for insanity where we are given a bitmap that
@@ -228,32 +278,73 @@ int ieee80211_radiotap_iterator_next(
228 * max_length on the last arg, never exceeding it. 278 * max_length on the last arg, never exceeding it.
229 */ 279 */
230 280
231 if (((ulong)iterator->arg - (ulong)iterator->rtheader) > 281 if ((unsigned long)iterator->_arg -
232 iterator->max_length) 282 (unsigned long)iterator->_rtheader >
283 (unsigned long)iterator->_max_length)
233 return -EINVAL; 284 return -EINVAL;
234 285
235 next_entry: 286 /* these special ones are valid in each bitmap word */
236 iterator->arg_index++; 287 switch (iterator->_arg_index % 32) {
237 if (unlikely((iterator->arg_index & 31) == 0)) { 288 case IEEE80211_RADIOTAP_VENDOR_NAMESPACE:
238 /* completed current u32 bitmap */ 289 iterator->_bitmap_shifter >>= 1;
239 if (iterator->bitmap_shifter & 1) { 290 iterator->_arg_index++;
240 /* b31 was set, there is more */ 291
241 /* move to next u32 bitmap */ 292 iterator->_reset_on_ext = 1;
242 iterator->bitmap_shifter = 293
243 get_unaligned_le32(iterator->next_bitmap); 294 vnslen = get_unaligned_le16(iterator->this_arg + 4);
244 iterator->next_bitmap++; 295 iterator->_next_ns_data = iterator->_arg + vnslen;
245 } else 296 oui = (*iterator->this_arg << 16) |
246 /* no more bitmaps: end */ 297 (*(iterator->this_arg + 1) << 8) |
247 iterator->arg_index = sizeof(rt_sizes); 298 *(iterator->this_arg + 2);
248 } else /* just try the next bit */ 299 subns = *(iterator->this_arg + 3);
249 iterator->bitmap_shifter >>= 1; 300
301 find_ns(iterator, oui, subns);
302
303 iterator->is_radiotap_ns = 0;
304 /* allow parsers to show this information */
305 iterator->this_arg_index =
306 IEEE80211_RADIOTAP_VENDOR_NAMESPACE;
307 iterator->this_arg_size += vnslen;
308 if ((unsigned long)iterator->this_arg +
309 iterator->this_arg_size -
310 (unsigned long)iterator->_rtheader >
311 (unsigned long)(unsigned long)iterator->_max_length)
312 return -EINVAL;
313 hit = 1;
314 break;
315 case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE:
316 iterator->_bitmap_shifter >>= 1;
317 iterator->_arg_index++;
318
319 iterator->_reset_on_ext = 1;
320 iterator->current_namespace = &radiotap_ns;
321 iterator->is_radiotap_ns = 1;
322 break;
323 case IEEE80211_RADIOTAP_EXT:
324 /*
325 * bit 31 was set, there is more
326 * -- move to next u32 bitmap
327 */
328 iterator->_bitmap_shifter =
329 get_unaligned_le32(iterator->_next_bitmap);
330 iterator->_next_bitmap++;
331 if (iterator->_reset_on_ext)
332 iterator->_arg_index = 0;
333 else
334 iterator->_arg_index++;
335 iterator->_reset_on_ext = 0;
336 break;
337 default:
338 /* we've got a hit! */
339 hit = 1;
340 next_entry:
341 iterator->_bitmap_shifter >>= 1;
342 iterator->_arg_index++;
343 }
250 344
251 /* if we found a valid arg earlier, return it now */ 345 /* if we found a valid arg earlier, return it now */
252 if (hit) 346 if (hit)
253 return 0; 347 return 0;
254 } 348 }
255
256 /* we don't know how to handle any more args, we're done */
257 return -ENOENT;
258} 349}
259EXPORT_SYMBOL(ieee80211_radiotap_iterator_next); 350EXPORT_SYMBOL(ieee80211_radiotap_iterator_next);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index baa898add287..ed89c59bb431 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -40,8 +40,18 @@
40#include <net/cfg80211.h> 40#include <net/cfg80211.h>
41#include "core.h" 41#include "core.h"
42#include "reg.h" 42#include "reg.h"
43#include "regdb.h"
43#include "nl80211.h" 44#include "nl80211.h"
44 45
46#ifdef CONFIG_CFG80211_REG_DEBUG
47#define REG_DBG_PRINT(format, args...) \
48 do { \
49 printk(KERN_DEBUG format , ## args); \
50 } while (0)
51#else
52#define REG_DBG_PRINT(args...)
53#endif
54
45/* Receipt of information from last regulatory request */ 55/* Receipt of information from last regulatory request */
46static struct regulatory_request *last_request; 56static struct regulatory_request *last_request;
47 57
@@ -124,82 +134,11 @@ static const struct ieee80211_regdomain *cfg80211_world_regdom =
124 &world_regdom; 134 &world_regdom;
125 135
126static char *ieee80211_regdom = "00"; 136static char *ieee80211_regdom = "00";
137static char user_alpha2[2];
127 138
128module_param(ieee80211_regdom, charp, 0444); 139module_param(ieee80211_regdom, charp, 0444);
129MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); 140MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
130 141
131#ifdef CONFIG_WIRELESS_OLD_REGULATORY
132/*
133 * We assume 40 MHz bandwidth for the old regulatory work.
134 * We make emphasis we are using the exact same frequencies
135 * as before
136 */
137
138static const struct ieee80211_regdomain us_regdom = {
139 .n_reg_rules = 6,
140 .alpha2 = "US",
141 .reg_rules = {
142 /* IEEE 802.11b/g, channels 1..11 */
143 REG_RULE(2412-10, 2462+10, 40, 6, 27, 0),
144 /* IEEE 802.11a, channel 36..48 */
145 REG_RULE(5180-10, 5240+10, 40, 6, 17, 0),
146 /* IEEE 802.11a, channels 48..64 */
147 REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS),
148 /* IEEE 802.11a, channels 100..124 */
149 REG_RULE(5500-10, 5590+10, 40, 6, 20, NL80211_RRF_DFS),
150 /* IEEE 802.11a, channels 132..144 */
151 REG_RULE(5660-10, 5700+10, 40, 6, 20, NL80211_RRF_DFS),
152 /* IEEE 802.11a, channels 149..165, outdoor */
153 REG_RULE(5745-10, 5825+10, 40, 6, 30, 0),
154 }
155};
156
157static const struct ieee80211_regdomain jp_regdom = {
158 .n_reg_rules = 6,
159 .alpha2 = "JP",
160 .reg_rules = {
161 /* IEEE 802.11b/g, channels 1..11 */
162 REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
163 /* IEEE 802.11b/g, channels 12..13 */
164 REG_RULE(2467-10, 2472+10, 20, 6, 20, 0),
165 /* IEEE 802.11b/g, channel 14 */
166 REG_RULE(2484-10, 2484+10, 20, 6, 20, NL80211_RRF_NO_OFDM),
167 /* IEEE 802.11a, channels 36..48 */
168 REG_RULE(5180-10, 5240+10, 40, 6, 20, 0),
169 /* IEEE 802.11a, channels 52..64 */
170 REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS),
171 /* IEEE 802.11a, channels 100..144 */
172 REG_RULE(5500-10, 5700+10, 40, 6, 23, NL80211_RRF_DFS),
173 }
174};
175
176static const struct ieee80211_regdomain *static_regdom(char *alpha2)
177{
178 if (alpha2[0] == 'U' && alpha2[1] == 'S')
179 return &us_regdom;
180 if (alpha2[0] == 'J' && alpha2[1] == 'P')
181 return &jp_regdom;
182 /* Use world roaming rules for "EU", since it was a pseudo
183 domain anyway... */
184 if (alpha2[0] == 'E' && alpha2[1] == 'U')
185 return &world_regdom;
186 /* Default, world roaming rules */
187 return &world_regdom;
188}
189
190static bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
191{
192 if (rd == &us_regdom || rd == &jp_regdom || rd == &world_regdom)
193 return true;
194 return false;
195}
196#else
197static inline bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
198{
199 return false;
200}
201#endif
202
203static void reset_regdomains(void) 142static void reset_regdomains(void)
204{ 143{
205 /* avoid freeing static information or freeing something twice */ 144 /* avoid freeing static information or freeing something twice */
@@ -209,8 +148,6 @@ static void reset_regdomains(void)
209 cfg80211_world_regdom = NULL; 148 cfg80211_world_regdom = NULL;
210 if (cfg80211_regdomain == &world_regdom) 149 if (cfg80211_regdomain == &world_regdom)
211 cfg80211_regdomain = NULL; 150 cfg80211_regdomain = NULL;
212 if (is_old_static_regdom(cfg80211_regdomain))
213 cfg80211_regdomain = NULL;
214 151
215 kfree(cfg80211_regdomain); 152 kfree(cfg80211_regdomain);
216 kfree(cfg80211_world_regdom); 153 kfree(cfg80211_world_regdom);
@@ -316,6 +253,27 @@ static bool regdom_changes(const char *alpha2)
316 return true; 253 return true;
317} 254}
318 255
256/*
257 * The NL80211_REGDOM_SET_BY_USER regdom alpha2 is cached, this lets
258 * you know if a valid regulatory hint with NL80211_REGDOM_SET_BY_USER
259 * has ever been issued.
260 */
261static bool is_user_regdom_saved(void)
262{
263 if (user_alpha2[0] == '9' && user_alpha2[1] == '7')
264 return false;
265
266 /* This would indicate a mistake on the design */
267 if (WARN((!is_world_regdom(user_alpha2) &&
268 !is_an_alpha2(user_alpha2)),
269 "Unexpected user alpha2: %c%c\n",
270 user_alpha2[0],
271 user_alpha2[1]))
272 return false;
273
274 return true;
275}
276
319/** 277/**
320 * country_ie_integrity_changes - tells us if the country IE has changed 278 * country_ie_integrity_changes - tells us if the country IE has changed
321 * @checksum: checksum of country IE of fields we are interested in 279 * @checksum: checksum of country IE of fields we are interested in
@@ -335,6 +293,98 @@ static bool country_ie_integrity_changes(u32 checksum)
335 return false; 293 return false;
336} 294}
337 295
296static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd,
297 const struct ieee80211_regdomain *src_regd)
298{
299 struct ieee80211_regdomain *regd;
300 int size_of_regd = 0;
301 unsigned int i;
302
303 size_of_regd = sizeof(struct ieee80211_regdomain) +
304 ((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule));
305
306 regd = kzalloc(size_of_regd, GFP_KERNEL);
307 if (!regd)
308 return -ENOMEM;
309
310 memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
311
312 for (i = 0; i < src_regd->n_reg_rules; i++)
313 memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
314 sizeof(struct ieee80211_reg_rule));
315
316 *dst_regd = regd;
317 return 0;
318}
319
320#ifdef CONFIG_CFG80211_INTERNAL_REGDB
321struct reg_regdb_search_request {
322 char alpha2[2];
323 struct list_head list;
324};
325
326static LIST_HEAD(reg_regdb_search_list);
327static DEFINE_SPINLOCK(reg_regdb_search_lock);
328
329static void reg_regdb_search(struct work_struct *work)
330{
331 struct reg_regdb_search_request *request;
332 const struct ieee80211_regdomain *curdom, *regdom;
333 int i, r;
334
335 spin_lock(&reg_regdb_search_lock);
336 while (!list_empty(&reg_regdb_search_list)) {
337 request = list_first_entry(&reg_regdb_search_list,
338 struct reg_regdb_search_request,
339 list);
340 list_del(&request->list);
341
342 for (i=0; i<reg_regdb_size; i++) {
343 curdom = reg_regdb[i];
344
345 if (!memcmp(request->alpha2, curdom->alpha2, 2)) {
346 r = reg_copy_regd(&regdom, curdom);
347 if (r)
348 break;
349 spin_unlock(&reg_regdb_search_lock);
350 mutex_lock(&cfg80211_mutex);
351 set_regdom(regdom);
352 mutex_unlock(&cfg80211_mutex);
353 spin_lock(&reg_regdb_search_lock);
354 break;
355 }
356 }
357
358 kfree(request);
359 }
360 spin_unlock(&reg_regdb_search_lock);
361}
362
363static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
364
365static void reg_regdb_query(const char *alpha2)
366{
367 struct reg_regdb_search_request *request;
368
369 if (!alpha2)
370 return;
371
372 request = kzalloc(sizeof(struct reg_regdb_search_request), GFP_KERNEL);
373 if (!request)
374 return;
375
376 memcpy(request->alpha2, alpha2, 2);
377
378 spin_lock(&reg_regdb_search_lock);
379 list_add_tail(&request->list, &reg_regdb_search_list);
380 spin_unlock(&reg_regdb_search_lock);
381
382 schedule_work(&reg_regdb_work);
383}
384#else
385static inline void reg_regdb_query(const char *alpha2) {}
386#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
387
338/* 388/*
339 * This lets us keep regulatory code which is updated on a regulatory 389 * This lets us keep regulatory code which is updated on a regulatory
340 * basis in userspace. 390 * basis in userspace.
@@ -354,6 +404,9 @@ static int call_crda(const char *alpha2)
354 printk(KERN_INFO "cfg80211: Calling CRDA to update world " 404 printk(KERN_INFO "cfg80211: Calling CRDA to update world "
355 "regulatory domain\n"); 405 "regulatory domain\n");
356 406
407 /* query internal regulatory database (if it exists) */
408 reg_regdb_query(alpha2);
409
357 country_env[8] = alpha2[0]; 410 country_env[8] = alpha2[0];
358 country_env[9] = alpha2[1]; 411 country_env[9] = alpha2[1];
359 412
@@ -454,12 +507,212 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
454} 507}
455 508
456/* 509/*
510 * This is a work around for sanity checking ieee80211_channel_to_frequency()'s
511 * work. ieee80211_channel_to_frequency() can for example currently provide a
512 * 2 GHz channel when in fact a 5 GHz channel was desired. An example would be
513 * an AP providing channel 8 on a country IE triplet when it sent this on the
514 * 5 GHz band, that channel is designed to be channel 8 on 5 GHz, not a 2 GHz
515 * channel.
516 *
517 * This can be removed once ieee80211_channel_to_frequency() takes in a band.
518 */
519static bool chan_in_band(int chan, enum ieee80211_band band)
520{
521 int center_freq = ieee80211_channel_to_frequency(chan);
522
523 switch (band) {
524 case IEEE80211_BAND_2GHZ:
525 if (center_freq <= 2484)
526 return true;
527 return false;
528 case IEEE80211_BAND_5GHZ:
529 if (center_freq >= 5005)
530 return true;
531 return false;
532 default:
533 return false;
534 }
535}
536
537/*
538 * Some APs may send a country IE triplet for each channel they
539 * support and while this is completely overkill and silly we still
540 * need to support it. We avoid making a single rule for each channel
541 * though and to help us with this we use this helper to find the
542 * actual subband end channel. These type of country IE triplet
543 * scenerios are handled then, all yielding two regulaotry rules from
544 * parsing a country IE:
545 *
546 * [1]
547 * [2]
548 * [36]
549 * [40]
550 *
551 * [1]
552 * [2-4]
553 * [5-12]
554 * [36]
555 * [40-44]
556 *
557 * [1-4]
558 * [5-7]
559 * [36-44]
560 * [48-64]
561 *
562 * [36-36]
563 * [40-40]
564 * [44-44]
565 * [48-48]
566 * [52-52]
567 * [56-56]
568 * [60-60]
569 * [64-64]
570 * [100-100]
571 * [104-104]
572 * [108-108]
573 * [112-112]
574 * [116-116]
575 * [120-120]
576 * [124-124]
577 * [128-128]
578 * [132-132]
579 * [136-136]
580 * [140-140]
581 *
582 * Returns 0 if the IE has been found to be invalid in the middle
583 * somewhere.
584 */
585static int max_subband_chan(enum ieee80211_band band,
586 int orig_cur_chan,
587 int orig_end_channel,
588 s8 orig_max_power,
589 u8 **country_ie,
590 u8 *country_ie_len)
591{
592 u8 *triplets_start = *country_ie;
593 u8 len_at_triplet = *country_ie_len;
594 int end_subband_chan = orig_end_channel;
595
596 /*
597 * We'll deal with padding for the caller unless
598 * its not immediate and we don't process any channels
599 */
600 if (*country_ie_len == 1) {
601 *country_ie += 1;
602 *country_ie_len -= 1;
603 return orig_end_channel;
604 }
605
606 /* Move to the next triplet and then start search */
607 *country_ie += 3;
608 *country_ie_len -= 3;
609
610 if (!chan_in_band(orig_cur_chan, band))
611 return 0;
612
613 while (*country_ie_len >= 3) {
614 int end_channel = 0;
615 struct ieee80211_country_ie_triplet *triplet =
616 (struct ieee80211_country_ie_triplet *) *country_ie;
617 int cur_channel = 0, next_expected_chan;
618
619 /* means last triplet is completely unrelated to this one */
620 if (triplet->ext.reg_extension_id >=
621 IEEE80211_COUNTRY_EXTENSION_ID) {
622 *country_ie -= 3;
623 *country_ie_len += 3;
624 break;
625 }
626
627 if (triplet->chans.first_channel == 0) {
628 *country_ie += 1;
629 *country_ie_len -= 1;
630 if (*country_ie_len != 0)
631 return 0;
632 break;
633 }
634
635 if (triplet->chans.num_channels == 0)
636 return 0;
637
638 /* Monitonically increasing channel order */
639 if (triplet->chans.first_channel <= end_subband_chan)
640 return 0;
641
642 if (!chan_in_band(triplet->chans.first_channel, band))
643 return 0;
644
645 /* 2 GHz */
646 if (triplet->chans.first_channel <= 14) {
647 end_channel = triplet->chans.first_channel +
648 triplet->chans.num_channels - 1;
649 }
650 else {
651 end_channel = triplet->chans.first_channel +
652 (4 * (triplet->chans.num_channels - 1));
653 }
654
655 if (!chan_in_band(end_channel, band))
656 return 0;
657
658 if (orig_max_power != triplet->chans.max_power) {
659 *country_ie -= 3;
660 *country_ie_len += 3;
661 break;
662 }
663
664 cur_channel = triplet->chans.first_channel;
665
666 /* The key is finding the right next expected channel */
667 if (band == IEEE80211_BAND_2GHZ)
668 next_expected_chan = end_subband_chan + 1;
669 else
670 next_expected_chan = end_subband_chan + 4;
671
672 if (cur_channel != next_expected_chan) {
673 *country_ie -= 3;
674 *country_ie_len += 3;
675 break;
676 }
677
678 end_subband_chan = end_channel;
679
680 /* Move to the next one */
681 *country_ie += 3;
682 *country_ie_len -= 3;
683
684 /*
685 * Padding needs to be dealt with if we processed
686 * some channels.
687 */
688 if (*country_ie_len == 1) {
689 *country_ie += 1;
690 *country_ie_len -= 1;
691 break;
692 }
693
694 /* If seen, the IE is invalid */
695 if (*country_ie_len == 2)
696 return 0;
697 }
698
699 if (end_subband_chan == orig_end_channel) {
700 *country_ie = triplets_start;
701 *country_ie_len = len_at_triplet;
702 return orig_end_channel;
703 }
704
705 return end_subband_chan;
706}
707
708/*
457 * Converts a country IE to a regulatory domain. A regulatory domain 709 * Converts a country IE to a regulatory domain. A regulatory domain
458 * structure has a lot of information which the IE doesn't yet have, 710 * structure has a lot of information which the IE doesn't yet have,
459 * so for the other values we use upper max values as we will intersect 711 * so for the other values we use upper max values as we will intersect
460 * with our userspace regulatory agent to get lower bounds. 712 * with our userspace regulatory agent to get lower bounds.
461 */ 713 */
462static struct ieee80211_regdomain *country_ie_2_rd( 714static struct ieee80211_regdomain *country_ie_2_rd(
715 enum ieee80211_band band,
463 u8 *country_ie, 716 u8 *country_ie,
464 u8 country_ie_len, 717 u8 country_ie_len,
465 u32 *checksum) 718 u32 *checksum)
@@ -521,10 +774,29 @@ static struct ieee80211_regdomain *country_ie_2_rd(
521 continue; 774 continue;
522 } 775 }
523 776
777 /*
778 * APs can add padding to make length divisible
779 * by two, required by the spec.
780 */
781 if (triplet->chans.first_channel == 0) {
782 country_ie++;
783 country_ie_len--;
784 /* This is expected to be at the very end only */
785 if (country_ie_len != 0)
786 return NULL;
787 break;
788 }
789
790 if (triplet->chans.num_channels == 0)
791 return NULL;
792
793 if (!chan_in_band(triplet->chans.first_channel, band))
794 return NULL;
795
524 /* 2 GHz */ 796 /* 2 GHz */
525 if (triplet->chans.first_channel <= 14) 797 if (band == IEEE80211_BAND_2GHZ)
526 end_channel = triplet->chans.first_channel + 798 end_channel = triplet->chans.first_channel +
527 triplet->chans.num_channels; 799 triplet->chans.num_channels - 1;
528 else 800 else
529 /* 801 /*
530 * 5 GHz -- For example in country IEs if the first 802 * 5 GHz -- For example in country IEs if the first
@@ -539,6 +811,24 @@ static struct ieee80211_regdomain *country_ie_2_rd(
539 (4 * (triplet->chans.num_channels - 1)); 811 (4 * (triplet->chans.num_channels - 1));
540 812
541 cur_channel = triplet->chans.first_channel; 813 cur_channel = triplet->chans.first_channel;
814
815 /*
816 * Enhancement for APs that send a triplet for every channel
817 * or for whatever reason sends triplets with multiple channels
818 * separated when in fact they should be together.
819 */
820 end_channel = max_subband_chan(band,
821 cur_channel,
822 end_channel,
823 triplet->chans.max_power,
824 &country_ie,
825 &country_ie_len);
826 if (!end_channel)
827 return NULL;
828
829 if (!chan_in_band(end_channel, band))
830 return NULL;
831
542 cur_sub_max_channel = end_channel; 832 cur_sub_max_channel = end_channel;
543 833
544 /* Basic sanity check */ 834 /* Basic sanity check */
@@ -569,10 +859,13 @@ static struct ieee80211_regdomain *country_ie_2_rd(
569 859
570 last_sub_max_channel = cur_sub_max_channel; 860 last_sub_max_channel = cur_sub_max_channel;
571 861
572 country_ie += 3;
573 country_ie_len -= 3;
574 num_rules++; 862 num_rules++;
575 863
864 if (country_ie_len >= 3) {
865 country_ie += 3;
866 country_ie_len -= 3;
867 }
868
576 /* 869 /*
577 * Note: this is not a IEEE requirement but 870 * Note: this is not a IEEE requirement but
578 * simply a memory requirement 871 * simply a memory requirement
@@ -615,6 +908,12 @@ static struct ieee80211_regdomain *country_ie_2_rd(
615 continue; 908 continue;
616 } 909 }
617 910
911 if (triplet->chans.first_channel == 0) {
912 country_ie++;
913 country_ie_len--;
914 break;
915 }
916
618 reg_rule = &rd->reg_rules[i]; 917 reg_rule = &rd->reg_rules[i];
619 freq_range = &reg_rule->freq_range; 918 freq_range = &reg_rule->freq_range;
620 power_rule = &reg_rule->power_rule; 919 power_rule = &reg_rule->power_rule;
@@ -622,13 +921,20 @@ static struct ieee80211_regdomain *country_ie_2_rd(
622 reg_rule->flags = flags; 921 reg_rule->flags = flags;
623 922
624 /* 2 GHz */ 923 /* 2 GHz */
625 if (triplet->chans.first_channel <= 14) 924 if (band == IEEE80211_BAND_2GHZ)
626 end_channel = triplet->chans.first_channel + 925 end_channel = triplet->chans.first_channel +
627 triplet->chans.num_channels; 926 triplet->chans.num_channels -1;
628 else 927 else
629 end_channel = triplet->chans.first_channel + 928 end_channel = triplet->chans.first_channel +
630 (4 * (triplet->chans.num_channels - 1)); 929 (4 * (triplet->chans.num_channels - 1));
631 930
931 end_channel = max_subband_chan(band,
932 triplet->chans.first_channel,
933 end_channel,
934 triplet->chans.max_power,
935 &country_ie,
936 &country_ie_len);
937
632 /* 938 /*
633 * The +10 is since the regulatory domain expects 939 * The +10 is since the regulatory domain expects
634 * the actual band edge, not the center of freq for 940 * the actual band edge, not the center of freq for
@@ -649,12 +955,15 @@ static struct ieee80211_regdomain *country_ie_2_rd(
649 */ 955 */
650 freq_range->max_bandwidth_khz = MHZ_TO_KHZ(40); 956 freq_range->max_bandwidth_khz = MHZ_TO_KHZ(40);
651 power_rule->max_antenna_gain = DBI_TO_MBI(100); 957 power_rule->max_antenna_gain = DBI_TO_MBI(100);
652 power_rule->max_eirp = DBM_TO_MBM(100); 958 power_rule->max_eirp = DBM_TO_MBM(triplet->chans.max_power);
653 959
654 country_ie += 3;
655 country_ie_len -= 3;
656 i++; 960 i++;
657 961
962 if (country_ie_len >= 3) {
963 country_ie += 3;
964 country_ie_len -= 3;
965 }
966
658 BUG_ON(i > NL80211_MAX_SUPP_REG_RULES); 967 BUG_ON(i > NL80211_MAX_SUPP_REG_RULES);
659 } 968 }
660 969
@@ -950,25 +1259,21 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
950 if (r == -ERANGE && 1259 if (r == -ERANGE &&
951 last_request->initiator == 1260 last_request->initiator ==
952 NL80211_REGDOM_SET_BY_COUNTRY_IE) { 1261 NL80211_REGDOM_SET_BY_COUNTRY_IE) {
953#ifdef CONFIG_CFG80211_REG_DEBUG 1262 REG_DBG_PRINT("cfg80211: Leaving channel %d MHz "
954 printk(KERN_DEBUG "cfg80211: Leaving channel %d MHz "
955 "intact on %s - no rule found in band on " 1263 "intact on %s - no rule found in band on "
956 "Country IE\n", 1264 "Country IE\n",
957 chan->center_freq, wiphy_name(wiphy)); 1265 chan->center_freq, wiphy_name(wiphy));
958#endif
959 } else { 1266 } else {
960 /* 1267 /*
961 * In this case we know the country IE has at least one reg rule 1268 * In this case we know the country IE has at least one reg rule
962 * for the band so we respect its band definitions 1269 * for the band so we respect its band definitions
963 */ 1270 */
964#ifdef CONFIG_CFG80211_REG_DEBUG
965 if (last_request->initiator == 1271 if (last_request->initiator ==
966 NL80211_REGDOM_SET_BY_COUNTRY_IE) 1272 NL80211_REGDOM_SET_BY_COUNTRY_IE)
967 printk(KERN_DEBUG "cfg80211: Disabling " 1273 REG_DBG_PRINT("cfg80211: Disabling "
968 "channel %d MHz on %s due to " 1274 "channel %d MHz on %s due to "
969 "Country IE\n", 1275 "Country IE\n",
970 chan->center_freq, wiphy_name(wiphy)); 1276 chan->center_freq, wiphy_name(wiphy));
971#endif
972 flags |= IEEE80211_CHAN_DISABLED; 1277 flags |= IEEE80211_CHAN_DISABLED;
973 chan->flags = flags; 1278 chan->flags = flags;
974 } 1279 }
@@ -1342,30 +1647,6 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
1342} 1647}
1343EXPORT_SYMBOL(wiphy_apply_custom_regulatory); 1648EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
1344 1649
1345static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd,
1346 const struct ieee80211_regdomain *src_regd)
1347{
1348 struct ieee80211_regdomain *regd;
1349 int size_of_regd = 0;
1350 unsigned int i;
1351
1352 size_of_regd = sizeof(struct ieee80211_regdomain) +
1353 ((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule));
1354
1355 regd = kzalloc(size_of_regd, GFP_KERNEL);
1356 if (!regd)
1357 return -ENOMEM;
1358
1359 memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
1360
1361 for (i = 0; i < src_regd->n_reg_rules; i++)
1362 memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
1363 sizeof(struct ieee80211_reg_rule));
1364
1365 *dst_regd = regd;
1366 return 0;
1367}
1368
1369/* 1650/*
1370 * Return value which can be used by ignore_request() to indicate 1651 * Return value which can be used by ignore_request() to indicate
1371 * it has been determined we should intersect two regulatory domains 1652 * it has been determined we should intersect two regulatory domains
@@ -1387,7 +1668,7 @@ static int ignore_request(struct wiphy *wiphy,
1387 1668
1388 switch (pending_request->initiator) { 1669 switch (pending_request->initiator) {
1389 case NL80211_REGDOM_SET_BY_CORE: 1670 case NL80211_REGDOM_SET_BY_CORE:
1390 return -EINVAL; 1671 return 0;
1391 case NL80211_REGDOM_SET_BY_COUNTRY_IE: 1672 case NL80211_REGDOM_SET_BY_COUNTRY_IE:
1392 1673
1393 last_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); 1674 last_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
@@ -1418,8 +1699,6 @@ static int ignore_request(struct wiphy *wiphy,
1418 return REG_INTERSECT; 1699 return REG_INTERSECT;
1419 case NL80211_REGDOM_SET_BY_DRIVER: 1700 case NL80211_REGDOM_SET_BY_DRIVER:
1420 if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) { 1701 if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) {
1421 if (is_old_static_regdom(cfg80211_regdomain))
1422 return 0;
1423 if (regdom_changes(pending_request->alpha2)) 1702 if (regdom_changes(pending_request->alpha2))
1424 return 0; 1703 return 0;
1425 return -EALREADY; 1704 return -EALREADY;
@@ -1456,8 +1735,7 @@ static int ignore_request(struct wiphy *wiphy,
1456 return -EAGAIN; 1735 return -EAGAIN;
1457 } 1736 }
1458 1737
1459 if (!is_old_static_regdom(cfg80211_regdomain) && 1738 if (!regdom_changes(pending_request->alpha2))
1460 !regdom_changes(pending_request->alpha2))
1461 return -EALREADY; 1739 return -EALREADY;
1462 1740
1463 return 0; 1741 return 0;
@@ -1529,6 +1807,11 @@ new_request:
1529 1807
1530 pending_request = NULL; 1808 pending_request = NULL;
1531 1809
1810 if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) {
1811 user_alpha2[0] = last_request->alpha2[0];
1812 user_alpha2[1] = last_request->alpha2[1];
1813 }
1814
1532 /* When r == REG_INTERSECT we do need to call CRDA */ 1815 /* When r == REG_INTERSECT we do need to call CRDA */
1533 if (r < 0) { 1816 if (r < 0) {
1534 /* 1817 /*
@@ -1648,12 +1931,16 @@ static void queue_regulatory_request(struct regulatory_request *request)
1648 schedule_work(&reg_work); 1931 schedule_work(&reg_work);
1649} 1932}
1650 1933
1651/* Core regulatory hint -- happens once during cfg80211_init() */ 1934/*
1935 * Core regulatory hint -- happens during cfg80211_init()
1936 * and when we restore regulatory settings.
1937 */
1652static int regulatory_hint_core(const char *alpha2) 1938static int regulatory_hint_core(const char *alpha2)
1653{ 1939{
1654 struct regulatory_request *request; 1940 struct regulatory_request *request;
1655 1941
1656 BUG_ON(last_request); 1942 kfree(last_request);
1943 last_request = NULL;
1657 1944
1658 request = kzalloc(sizeof(struct regulatory_request), 1945 request = kzalloc(sizeof(struct regulatory_request),
1659 GFP_KERNEL); 1946 GFP_KERNEL);
@@ -1664,14 +1951,12 @@ static int regulatory_hint_core(const char *alpha2)
1664 request->alpha2[1] = alpha2[1]; 1951 request->alpha2[1] = alpha2[1];
1665 request->initiator = NL80211_REGDOM_SET_BY_CORE; 1952 request->initiator = NL80211_REGDOM_SET_BY_CORE;
1666 1953
1667 queue_regulatory_request(request);
1668
1669 /* 1954 /*
1670 * This ensures last_request is populated once modules 1955 * This ensures last_request is populated once modules
1671 * come swinging in and calling regulatory hints and 1956 * come swinging in and calling regulatory hints and
1672 * wiphy_apply_custom_regulatory(). 1957 * wiphy_apply_custom_regulatory().
1673 */ 1958 */
1674 flush_scheduled_work(); 1959 reg_process_hint(request);
1675 1960
1676 return 0; 1961 return 0;
1677} 1962}
@@ -1690,7 +1975,7 @@ int regulatory_hint_user(const char *alpha2)
1690 request->wiphy_idx = WIPHY_IDX_STALE; 1975 request->wiphy_idx = WIPHY_IDX_STALE;
1691 request->alpha2[0] = alpha2[0]; 1976 request->alpha2[0] = alpha2[0];
1692 request->alpha2[1] = alpha2[1]; 1977 request->alpha2[1] = alpha2[1];
1693 request->initiator = NL80211_REGDOM_SET_BY_USER, 1978 request->initiator = NL80211_REGDOM_SET_BY_USER;
1694 1979
1695 queue_regulatory_request(request); 1980 queue_regulatory_request(request);
1696 1981
@@ -1758,8 +2043,9 @@ static bool reg_same_country_ie_hint(struct wiphy *wiphy,
1758 * therefore cannot iterate over the rdev list here. 2043 * therefore cannot iterate over the rdev list here.
1759 */ 2044 */
1760void regulatory_hint_11d(struct wiphy *wiphy, 2045void regulatory_hint_11d(struct wiphy *wiphy,
1761 u8 *country_ie, 2046 enum ieee80211_band band,
1762 u8 country_ie_len) 2047 u8 *country_ie,
2048 u8 country_ie_len)
1763{ 2049{
1764 struct ieee80211_regdomain *rd = NULL; 2050 struct ieee80211_regdomain *rd = NULL;
1765 char alpha2[2]; 2051 char alpha2[2];
@@ -1805,9 +2091,11 @@ void regulatory_hint_11d(struct wiphy *wiphy,
1805 wiphy_idx_valid(last_request->wiphy_idx))) 2091 wiphy_idx_valid(last_request->wiphy_idx)))
1806 goto out; 2092 goto out;
1807 2093
1808 rd = country_ie_2_rd(country_ie, country_ie_len, &checksum); 2094 rd = country_ie_2_rd(band, country_ie, country_ie_len, &checksum);
1809 if (!rd) 2095 if (!rd) {
2096 REG_DBG_PRINT("cfg80211: Ignoring bogus country IE\n");
1810 goto out; 2097 goto out;
2098 }
1811 2099
1812 /* 2100 /*
1813 * This will not happen right now but we leave it here for the 2101 * This will not happen right now but we leave it here for the
@@ -1850,6 +2138,123 @@ out:
1850 mutex_unlock(&reg_mutex); 2138 mutex_unlock(&reg_mutex);
1851} 2139}
1852 2140
2141static void restore_alpha2(char *alpha2, bool reset_user)
2142{
2143 /* indicates there is no alpha2 to consider for restoration */
2144 alpha2[0] = '9';
2145 alpha2[1] = '7';
2146
2147 /* The user setting has precedence over the module parameter */
2148 if (is_user_regdom_saved()) {
2149 /* Unless we're asked to ignore it and reset it */
2150 if (reset_user) {
2151 REG_DBG_PRINT("cfg80211: Restoring regulatory settings "
2152 "including user preference\n");
2153 user_alpha2[0] = '9';
2154 user_alpha2[1] = '7';
2155
2156 /*
2157 * If we're ignoring user settings, we still need to
2158 * check the module parameter to ensure we put things
2159 * back as they were for a full restore.
2160 */
2161 if (!is_world_regdom(ieee80211_regdom)) {
2162 REG_DBG_PRINT("cfg80211: Keeping preference on "
2163 "module parameter ieee80211_regdom: %c%c\n",
2164 ieee80211_regdom[0],
2165 ieee80211_regdom[1]);
2166 alpha2[0] = ieee80211_regdom[0];
2167 alpha2[1] = ieee80211_regdom[1];
2168 }
2169 } else {
2170 REG_DBG_PRINT("cfg80211: Restoring regulatory settings "
2171 "while preserving user preference for: %c%c\n",
2172 user_alpha2[0],
2173 user_alpha2[1]);
2174 alpha2[0] = user_alpha2[0];
2175 alpha2[1] = user_alpha2[1];
2176 }
2177 } else if (!is_world_regdom(ieee80211_regdom)) {
2178 REG_DBG_PRINT("cfg80211: Keeping preference on "
2179 "module parameter ieee80211_regdom: %c%c\n",
2180 ieee80211_regdom[0],
2181 ieee80211_regdom[1]);
2182 alpha2[0] = ieee80211_regdom[0];
2183 alpha2[1] = ieee80211_regdom[1];
2184 } else
2185 REG_DBG_PRINT("cfg80211: Restoring regulatory settings\n");
2186}
2187
2188/*
2189 * Restoring regulatory settings involves ingoring any
2190 * possibly stale country IE information and user regulatory
2191 * settings if so desired, this includes any beacon hints
2192 * learned as we could have traveled outside to another country
2193 * after disconnection. To restore regulatory settings we do
2194 * exactly what we did at bootup:
2195 *
2196 * - send a core regulatory hint
2197 * - send a user regulatory hint if applicable
2198 *
2199 * Device drivers that send a regulatory hint for a specific country
2200 * keep their own regulatory domain on wiphy->regd so that does does
2201 * not need to be remembered.
2202 */
2203static void restore_regulatory_settings(bool reset_user)
2204{
2205 char alpha2[2];
2206 struct reg_beacon *reg_beacon, *btmp;
2207
2208 mutex_lock(&cfg80211_mutex);
2209 mutex_lock(&reg_mutex);
2210
2211 reset_regdomains();
2212 restore_alpha2(alpha2, reset_user);
2213
2214 /* Clear beacon hints */
2215 spin_lock_bh(&reg_pending_beacons_lock);
2216 if (!list_empty(&reg_pending_beacons)) {
2217 list_for_each_entry_safe(reg_beacon, btmp,
2218 &reg_pending_beacons, list) {
2219 list_del(&reg_beacon->list);
2220 kfree(reg_beacon);
2221 }
2222 }
2223 spin_unlock_bh(&reg_pending_beacons_lock);
2224
2225 if (!list_empty(&reg_beacon_list)) {
2226 list_for_each_entry_safe(reg_beacon, btmp,
2227 &reg_beacon_list, list) {
2228 list_del(&reg_beacon->list);
2229 kfree(reg_beacon);
2230 }
2231 }
2232
2233 /* First restore to the basic regulatory settings */
2234 cfg80211_regdomain = cfg80211_world_regdom;
2235
2236 mutex_unlock(&reg_mutex);
2237 mutex_unlock(&cfg80211_mutex);
2238
2239 regulatory_hint_core(cfg80211_regdomain->alpha2);
2240
2241 /*
2242 * This restores the ieee80211_regdom module parameter
2243 * preference or the last user requested regulatory
2244 * settings, user regulatory settings takes precedence.
2245 */
2246 if (is_an_alpha2(alpha2))
2247 regulatory_hint_user(user_alpha2);
2248}
2249
2250
2251void regulatory_hint_disconnect(void)
2252{
2253 REG_DBG_PRINT("cfg80211: All devices are disconnected, going to "
2254 "restore regulatory settings\n");
2255 restore_regulatory_settings(false);
2256}
2257
1853static bool freq_is_chan_12_13_14(u16 freq) 2258static bool freq_is_chan_12_13_14(u16 freq)
1854{ 2259{
1855 if (freq == ieee80211_channel_to_frequency(12) || 2260 if (freq == ieee80211_channel_to_frequency(12) ||
@@ -1875,13 +2280,12 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
1875 if (!reg_beacon) 2280 if (!reg_beacon)
1876 return -ENOMEM; 2281 return -ENOMEM;
1877 2282
1878#ifdef CONFIG_CFG80211_REG_DEBUG 2283 REG_DBG_PRINT("cfg80211: Found new beacon on "
1879 printk(KERN_DEBUG "cfg80211: Found new beacon on " 2284 "frequency: %d MHz (Ch %d) on %s\n",
1880 "frequency: %d MHz (Ch %d) on %s\n", 2285 beacon_chan->center_freq,
1881 beacon_chan->center_freq, 2286 ieee80211_frequency_to_channel(beacon_chan->center_freq),
1882 ieee80211_frequency_to_channel(beacon_chan->center_freq), 2287 wiphy_name(wiphy));
1883 wiphy_name(wiphy)); 2288
1884#endif
1885 memcpy(&reg_beacon->chan, beacon_chan, 2289 memcpy(&reg_beacon->chan, beacon_chan,
1886 sizeof(struct ieee80211_channel)); 2290 sizeof(struct ieee80211_channel));
1887 2291
@@ -2039,8 +2443,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2039 * If someone else asked us to change the rd lets only bother 2443 * If someone else asked us to change the rd lets only bother
2040 * checking if the alpha2 changes if CRDA was already called 2444 * checking if the alpha2 changes if CRDA was already called
2041 */ 2445 */
2042 if (!is_old_static_regdom(cfg80211_regdomain) && 2446 if (!regdom_changes(rd->alpha2))
2043 !regdom_changes(rd->alpha2))
2044 return -EINVAL; 2447 return -EINVAL;
2045 } 2448 }
2046 2449
@@ -2239,15 +2642,11 @@ int regulatory_init(void)
2239 spin_lock_init(&reg_requests_lock); 2642 spin_lock_init(&reg_requests_lock);
2240 spin_lock_init(&reg_pending_beacons_lock); 2643 spin_lock_init(&reg_pending_beacons_lock);
2241 2644
2242#ifdef CONFIG_WIRELESS_OLD_REGULATORY
2243 cfg80211_regdomain = static_regdom(ieee80211_regdom);
2244
2245 printk(KERN_INFO "cfg80211: Using static regulatory domain info\n");
2246 print_regdomain_info(cfg80211_regdomain);
2247#else
2248 cfg80211_regdomain = cfg80211_world_regdom; 2645 cfg80211_regdomain = cfg80211_world_regdom;
2249 2646
2250#endif 2647 user_alpha2[0] = '9';
2648 user_alpha2[1] = '7';
2649
2251 /* We always try to get an update for the static regdomain */ 2650 /* We always try to get an update for the static regdomain */
2252 err = regulatory_hint_core(cfg80211_regdomain->alpha2); 2651 err = regulatory_hint_core(cfg80211_regdomain->alpha2);
2253 if (err) { 2652 if (err) {
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 3362c7c069b2..b26224a9f3bc 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -41,15 +41,44 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
41 * regulatory_hint_11d - hints a country IE as a regulatory domain 41 * regulatory_hint_11d - hints a country IE as a regulatory domain
42 * @wiphy: the wireless device giving the hint (used only for reporting 42 * @wiphy: the wireless device giving the hint (used only for reporting
43 * conflicts) 43 * conflicts)
44 * @band: the band on which the country IE was received on. This determines
45 * the band we'll process the country IE channel triplets for.
44 * @country_ie: pointer to the country IE 46 * @country_ie: pointer to the country IE
45 * @country_ie_len: length of the country IE 47 * @country_ie_len: length of the country IE
46 * 48 *
47 * We will intersect the rd with the what CRDA tells us should apply 49 * We will intersect the rd with the what CRDA tells us should apply
48 * for the alpha2 this country IE belongs to, this prevents APs from 50 * for the alpha2 this country IE belongs to, this prevents APs from
49 * sending us incorrect or outdated information against a country. 51 * sending us incorrect or outdated information against a country.
52 *
53 * The AP is expected to provide Country IE channel triplets for the
54 * band it is on. It is technically possible for APs to send channel
55 * country IE triplets even for channels outside of the band they are
56 * in but for that they would have to use the regulatory extension
57 * in combination with a triplet but this behaviour is currently
58 * not observed. For this reason if a triplet is seen with channel
59 * information for a band the BSS is not present in it will be ignored.
50 */ 60 */
51void regulatory_hint_11d(struct wiphy *wiphy, 61void regulatory_hint_11d(struct wiphy *wiphy,
62 enum ieee80211_band band,
52 u8 *country_ie, 63 u8 *country_ie,
53 u8 country_ie_len); 64 u8 country_ie_len);
54 65
66/**
67 * regulatory_hint_disconnect - informs all devices have been disconneted
68 *
69 * Regulotory rules can be enhanced further upon scanning and upon
70 * connection to an AP. These rules become stale if we disconnect
71 * and go to another country, whether or not we suspend and resume.
72 * If we suspend, go to another country and resume we'll automatically
73 * get disconnected shortly after resuming and things will be reset as well.
74 * This routine is a helper to restore regulatory settings to how they were
75 * prior to our first connect attempt. This includes ignoring country IE and
76 * beacon regulatory hints. The ieee80211_regdom module parameter will always
77 * be respected but if a user had set the regulatory domain that will take
78 * precedence.
79 *
80 * Must be called from process context.
81 */
82void regulatory_hint_disconnect(void);
83
55#endif /* __NET_WIRELESS_REG_H */ 84#endif /* __NET_WIRELESS_REG_H */
diff --git a/net/wireless/regdb.h b/net/wireless/regdb.h
new file mode 100644
index 000000000000..818222c92513
--- /dev/null
+++ b/net/wireless/regdb.h
@@ -0,0 +1,7 @@
1#ifndef __REGDB_H__
2#define __REGDB_H__
3
4extern const struct ieee80211_regdomain *reg_regdb[];
5extern int reg_regdb_size;
6
7#endif /* __REGDB_H__ */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 12dfa62aad18..978cac3414b5 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -100,8 +100,10 @@ static void bss_release(struct kref *ref)
100 if (bss->pub.free_priv) 100 if (bss->pub.free_priv)
101 bss->pub.free_priv(&bss->pub); 101 bss->pub.free_priv(&bss->pub);
102 102
103 if (bss->ies_allocated) 103 if (bss->beacon_ies_allocated)
104 kfree(bss->pub.information_elements); 104 kfree(bss->pub.beacon_ies);
105 if (bss->proberesp_ies_allocated)
106 kfree(bss->pub.proberesp_ies);
105 107
106 BUG_ON(atomic_read(&bss->hold)); 108 BUG_ON(atomic_read(&bss->hold));
107 109
@@ -141,9 +143,9 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
141 dev->bss_generation++; 143 dev->bss_generation++;
142} 144}
143 145
144static u8 *find_ie(u8 num, u8 *ies, int len) 146const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
145{ 147{
146 while (len > 2 && ies[0] != num) { 148 while (len > 2 && ies[0] != eid) {
147 len -= ies[1] + 2; 149 len -= ies[1] + 2;
148 ies += ies[1] + 2; 150 ies += ies[1] + 2;
149 } 151 }
@@ -153,11 +155,12 @@ static u8 *find_ie(u8 num, u8 *ies, int len)
153 return NULL; 155 return NULL;
154 return ies; 156 return ies;
155} 157}
158EXPORT_SYMBOL(cfg80211_find_ie);
156 159
157static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2) 160static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2)
158{ 161{
159 const u8 *ie1 = find_ie(num, ies1, len1); 162 const u8 *ie1 = cfg80211_find_ie(num, ies1, len1);
160 const u8 *ie2 = find_ie(num, ies2, len2); 163 const u8 *ie2 = cfg80211_find_ie(num, ies2, len2);
161 int r; 164 int r;
162 165
163 if (!ie1 && !ie2) 166 if (!ie1 && !ie2)
@@ -183,9 +186,9 @@ static bool is_bss(struct cfg80211_bss *a,
183 if (!ssid) 186 if (!ssid)
184 return true; 187 return true;
185 188
186 ssidie = find_ie(WLAN_EID_SSID, 189 ssidie = cfg80211_find_ie(WLAN_EID_SSID,
187 a->information_elements, 190 a->information_elements,
188 a->len_information_elements); 191 a->len_information_elements);
189 if (!ssidie) 192 if (!ssidie)
190 return false; 193 return false;
191 if (ssidie[1] != ssid_len) 194 if (ssidie[1] != ssid_len)
@@ -202,9 +205,9 @@ static bool is_mesh(struct cfg80211_bss *a,
202 if (!is_zero_ether_addr(a->bssid)) 205 if (!is_zero_ether_addr(a->bssid))
203 return false; 206 return false;
204 207
205 ie = find_ie(WLAN_EID_MESH_ID, 208 ie = cfg80211_find_ie(WLAN_EID_MESH_ID,
206 a->information_elements, 209 a->information_elements,
207 a->len_information_elements); 210 a->len_information_elements);
208 if (!ie) 211 if (!ie)
209 return false; 212 return false;
210 if (ie[1] != meshidlen) 213 if (ie[1] != meshidlen)
@@ -212,9 +215,9 @@ static bool is_mesh(struct cfg80211_bss *a,
212 if (memcmp(ie + 2, meshid, meshidlen)) 215 if (memcmp(ie + 2, meshid, meshidlen))
213 return false; 216 return false;
214 217
215 ie = find_ie(WLAN_EID_MESH_CONFIG, 218 ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
216 a->information_elements, 219 a->information_elements,
217 a->len_information_elements); 220 a->len_information_elements);
218 if (!ie) 221 if (!ie)
219 return false; 222 return false;
220 if (ie[1] != sizeof(struct ieee80211_meshconf_ie)) 223 if (ie[1] != sizeof(struct ieee80211_meshconf_ie))
@@ -375,8 +378,7 @@ rb_find_bss(struct cfg80211_registered_device *dev,
375 378
376static struct cfg80211_internal_bss * 379static struct cfg80211_internal_bss *
377cfg80211_bss_update(struct cfg80211_registered_device *dev, 380cfg80211_bss_update(struct cfg80211_registered_device *dev,
378 struct cfg80211_internal_bss *res, 381 struct cfg80211_internal_bss *res)
379 bool overwrite)
380{ 382{
381 struct cfg80211_internal_bss *found = NULL; 383 struct cfg80211_internal_bss *found = NULL;
382 const u8 *meshid, *meshcfg; 384 const u8 *meshid, *meshcfg;
@@ -394,11 +396,12 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
394 396
395 if (is_zero_ether_addr(res->pub.bssid)) { 397 if (is_zero_ether_addr(res->pub.bssid)) {
396 /* must be mesh, verify */ 398 /* must be mesh, verify */
397 meshid = find_ie(WLAN_EID_MESH_ID, res->pub.information_elements, 399 meshid = cfg80211_find_ie(WLAN_EID_MESH_ID,
398 res->pub.len_information_elements); 400 res->pub.information_elements,
399 meshcfg = find_ie(WLAN_EID_MESH_CONFIG, 401 res->pub.len_information_elements);
400 res->pub.information_elements, 402 meshcfg = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
401 res->pub.len_information_elements); 403 res->pub.information_elements,
404 res->pub.len_information_elements);
402 if (!meshid || !meshcfg || 405 if (!meshid || !meshcfg ||
403 meshcfg[1] != sizeof(struct ieee80211_meshconf_ie)) { 406 meshcfg[1] != sizeof(struct ieee80211_meshconf_ie)) {
404 /* bogus mesh */ 407 /* bogus mesh */
@@ -418,28 +421,64 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
418 found->pub.capability = res->pub.capability; 421 found->pub.capability = res->pub.capability;
419 found->ts = res->ts; 422 found->ts = res->ts;
420 423
421 /* overwrite IEs */ 424 /* Update IEs */
422 if (overwrite) { 425 if (res->pub.proberesp_ies) {
423 size_t used = dev->wiphy.bss_priv_size + sizeof(*res); 426 size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
424 size_t ielen = res->pub.len_information_elements; 427 size_t ielen = res->pub.len_proberesp_ies;
428
429 if (found->pub.proberesp_ies &&
430 !found->proberesp_ies_allocated &&
431 ksize(found) >= used + ielen) {
432 memcpy(found->pub.proberesp_ies,
433 res->pub.proberesp_ies, ielen);
434 found->pub.len_proberesp_ies = ielen;
435 } else {
436 u8 *ies = found->pub.proberesp_ies;
437
438 if (found->proberesp_ies_allocated)
439 ies = krealloc(ies, ielen, GFP_ATOMIC);
440 else
441 ies = kmalloc(ielen, GFP_ATOMIC);
425 442
426 if (!found->ies_allocated && ksize(found) >= used + ielen) { 443 if (ies) {
427 memcpy(found->pub.information_elements, 444 memcpy(ies, res->pub.proberesp_ies,
428 res->pub.information_elements, ielen); 445 ielen);
429 found->pub.len_information_elements = ielen; 446 found->proberesp_ies_allocated = true;
447 found->pub.proberesp_ies = ies;
448 found->pub.len_proberesp_ies = ielen;
449 }
450 }
451
452 /* Override possible earlier Beacon frame IEs */
453 found->pub.information_elements =
454 found->pub.proberesp_ies;
455 found->pub.len_information_elements =
456 found->pub.len_proberesp_ies;
457 }
458 if (res->pub.beacon_ies) {
459 size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
460 size_t ielen = res->pub.len_beacon_ies;
461
462 if (found->pub.beacon_ies &&
463 !found->beacon_ies_allocated &&
464 ksize(found) >= used + ielen) {
465 memcpy(found->pub.beacon_ies,
466 res->pub.beacon_ies, ielen);
467 found->pub.len_beacon_ies = ielen;
430 } else { 468 } else {
431 u8 *ies = found->pub.information_elements; 469 u8 *ies = found->pub.beacon_ies;
432 470
433 if (found->ies_allocated) 471 if (found->beacon_ies_allocated)
434 ies = krealloc(ies, ielen, GFP_ATOMIC); 472 ies = krealloc(ies, ielen, GFP_ATOMIC);
435 else 473 else
436 ies = kmalloc(ielen, GFP_ATOMIC); 474 ies = kmalloc(ielen, GFP_ATOMIC);
437 475
438 if (ies) { 476 if (ies) {
439 memcpy(ies, res->pub.information_elements, ielen); 477 memcpy(ies, res->pub.beacon_ies,
440 found->ies_allocated = true; 478 ielen);
441 found->pub.information_elements = ies; 479 found->beacon_ies_allocated = true;
442 found->pub.len_information_elements = ielen; 480 found->pub.beacon_ies = ies;
481 found->pub.len_beacon_ies = ielen;
443 } 482 }
444 } 483 }
445 } 484 }
@@ -489,14 +528,26 @@ cfg80211_inform_bss(struct wiphy *wiphy,
489 res->pub.tsf = timestamp; 528 res->pub.tsf = timestamp;
490 res->pub.beacon_interval = beacon_interval; 529 res->pub.beacon_interval = beacon_interval;
491 res->pub.capability = capability; 530 res->pub.capability = capability;
492 /* point to after the private area */ 531 /*
493 res->pub.information_elements = (u8 *)res + sizeof(*res) + privsz; 532 * Since we do not know here whether the IEs are from a Beacon or Probe
494 memcpy(res->pub.information_elements, ie, ielen); 533 * Response frame, we need to pick one of the options and only use it
495 res->pub.len_information_elements = ielen; 534 * with the driver that does not provide the full Beacon/Probe Response
535 * frame. Use Beacon frame pointer to avoid indicating that this should
536 * override the information_elements pointer should we have received an
537 * earlier indication of Probe Response data.
538 *
539 * The initial buffer for the IEs is allocated with the BSS entry and
540 * is located after the private area.
541 */
542 res->pub.beacon_ies = (u8 *)res + sizeof(*res) + privsz;
543 memcpy(res->pub.beacon_ies, ie, ielen);
544 res->pub.len_beacon_ies = ielen;
545 res->pub.information_elements = res->pub.beacon_ies;
546 res->pub.len_information_elements = res->pub.len_beacon_ies;
496 547
497 kref_init(&res->ref); 548 kref_init(&res->ref);
498 549
499 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res, 0); 550 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res);
500 if (!res) 551 if (!res)
501 return NULL; 552 return NULL;
502 553
@@ -517,7 +568,6 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
517 struct cfg80211_internal_bss *res; 568 struct cfg80211_internal_bss *res;
518 size_t ielen = len - offsetof(struct ieee80211_mgmt, 569 size_t ielen = len - offsetof(struct ieee80211_mgmt,
519 u.probe_resp.variable); 570 u.probe_resp.variable);
520 bool overwrite;
521 size_t privsz = wiphy->bss_priv_size; 571 size_t privsz = wiphy->bss_priv_size;
522 572
523 if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC && 573 if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC &&
@@ -538,16 +588,28 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
538 res->pub.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp); 588 res->pub.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
539 res->pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int); 589 res->pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
540 res->pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info); 590 res->pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
541 /* point to after the private area */ 591 /*
542 res->pub.information_elements = (u8 *)res + sizeof(*res) + privsz; 592 * The initial buffer for the IEs is allocated with the BSS entry and
543 memcpy(res->pub.information_elements, mgmt->u.probe_resp.variable, ielen); 593 * is located after the private area.
544 res->pub.len_information_elements = ielen; 594 */
595 if (ieee80211_is_probe_resp(mgmt->frame_control)) {
596 res->pub.proberesp_ies = (u8 *) res + sizeof(*res) + privsz;
597 memcpy(res->pub.proberesp_ies, mgmt->u.probe_resp.variable,
598 ielen);
599 res->pub.len_proberesp_ies = ielen;
600 res->pub.information_elements = res->pub.proberesp_ies;
601 res->pub.len_information_elements = res->pub.len_proberesp_ies;
602 } else {
603 res->pub.beacon_ies = (u8 *) res + sizeof(*res) + privsz;
604 memcpy(res->pub.beacon_ies, mgmt->u.beacon.variable, ielen);
605 res->pub.len_beacon_ies = ielen;
606 res->pub.information_elements = res->pub.beacon_ies;
607 res->pub.len_information_elements = res->pub.len_beacon_ies;
608 }
545 609
546 kref_init(&res->ref); 610 kref_init(&res->ref);
547 611
548 overwrite = ieee80211_is_probe_resp(mgmt->frame_control); 612 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res);
549
550 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res, overwrite);
551 if (!res) 613 if (!res)
552 return NULL; 614 return NULL;
553 615
@@ -601,7 +663,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
601 struct cfg80211_registered_device *rdev; 663 struct cfg80211_registered_device *rdev;
602 struct wiphy *wiphy; 664 struct wiphy *wiphy;
603 struct iw_scan_req *wreq = NULL; 665 struct iw_scan_req *wreq = NULL;
604 struct cfg80211_scan_request *creq; 666 struct cfg80211_scan_request *creq = NULL;
605 int i, err, n_channels = 0; 667 int i, err, n_channels = 0;
606 enum ieee80211_band band; 668 enum ieee80211_band band;
607 669
@@ -694,8 +756,10 @@ int cfg80211_wext_siwscan(struct net_device *dev,
694 /* translate "Scan for SSID" request */ 756 /* translate "Scan for SSID" request */
695 if (wreq) { 757 if (wreq) {
696 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { 758 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
697 if (wreq->essid_len > IEEE80211_MAX_SSID_LEN) 759 if (wreq->essid_len > IEEE80211_MAX_SSID_LEN) {
698 return -EINVAL; 760 err = -EINVAL;
761 goto out;
762 }
699 memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len); 763 memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len);
700 creq->ssids[0].ssid_len = wreq->essid_len; 764 creq->ssids[0].ssid_len = wreq->essid_len;
701 } 765 }
@@ -707,12 +771,15 @@ int cfg80211_wext_siwscan(struct net_device *dev,
707 err = rdev->ops->scan(wiphy, dev, creq); 771 err = rdev->ops->scan(wiphy, dev, creq);
708 if (err) { 772 if (err) {
709 rdev->scan_req = NULL; 773 rdev->scan_req = NULL;
710 kfree(creq); 774 /* creq will be freed below */
711 } else { 775 } else {
712 nl80211_send_scan_start(rdev, dev); 776 nl80211_send_scan_start(rdev, dev);
777 /* creq now owned by driver */
778 creq = NULL;
713 dev_hold(dev); 779 dev_hold(dev);
714 } 780 }
715 out: 781 out:
782 kfree(creq);
716 cfg80211_unlock_rdev(rdev); 783 cfg80211_unlock_rdev(rdev);
717 return err; 784 return err;
718} 785}
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 2333d78187e4..17fde0da1b08 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -34,6 +34,44 @@ struct cfg80211_conn {
34 bool auto_auth, prev_bssid_valid; 34 bool auto_auth, prev_bssid_valid;
35}; 35};
36 36
37bool cfg80211_is_all_idle(void)
38{
39 struct cfg80211_registered_device *rdev;
40 struct wireless_dev *wdev;
41 bool is_all_idle = true;
42
43 mutex_lock(&cfg80211_mutex);
44
45 /*
46 * All devices must be idle as otherwise if you are actively
47 * scanning some new beacon hints could be learned and would
48 * count as new regulatory hints.
49 */
50 list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
51 cfg80211_lock_rdev(rdev);
52 list_for_each_entry(wdev, &rdev->netdev_list, list) {
53 wdev_lock(wdev);
54 if (wdev->sme_state != CFG80211_SME_IDLE)
55 is_all_idle = false;
56 wdev_unlock(wdev);
57 }
58 cfg80211_unlock_rdev(rdev);
59 }
60
61 mutex_unlock(&cfg80211_mutex);
62
63 return is_all_idle;
64}
65
66static void disconnect_work(struct work_struct *work)
67{
68 if (!cfg80211_is_all_idle())
69 return;
70
71 regulatory_hint_disconnect();
72}
73
74static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
37 75
38static int cfg80211_conn_scan(struct wireless_dev *wdev) 76static int cfg80211_conn_scan(struct wireless_dev *wdev)
39{ 77{
@@ -454,6 +492,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
454 * - and country_ie[1] which is the IE length 492 * - and country_ie[1] which is the IE length
455 */ 493 */
456 regulatory_hint_11d(wdev->wiphy, 494 regulatory_hint_11d(wdev->wiphy,
495 bss->channel->band,
457 country_ie + 2, 496 country_ie + 2,
458 country_ie[1]); 497 country_ie[1]);
459} 498}
@@ -655,7 +694,10 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
655 memset(&wrqu, 0, sizeof(wrqu)); 694 memset(&wrqu, 0, sizeof(wrqu));
656 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 695 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
657 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); 696 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
697 wdev->wext.connect.ssid_len = 0;
658#endif 698#endif
699
700 schedule_work(&cfg80211_disconnect_work);
659} 701}
660 702
661void cfg80211_disconnected(struct net_device *dev, u16 reason, 703void cfg80211_disconnected(struct net_device *dev, u16 reason,
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index efe3c5c92b2d..9f2cef3e0ca0 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -33,10 +33,30 @@ static ssize_t name ## _show(struct device *dev, \
33 33
34SHOW_FMT(index, "%d", wiphy_idx); 34SHOW_FMT(index, "%d", wiphy_idx);
35SHOW_FMT(macaddress, "%pM", wiphy.perm_addr); 35SHOW_FMT(macaddress, "%pM", wiphy.perm_addr);
36SHOW_FMT(address_mask, "%pM", wiphy.addr_mask);
37
38static ssize_t addresses_show(struct device *dev,
39 struct device_attribute *attr,
40 char *buf)
41{
42 struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy;
43 char *start = buf;
44 int i;
45
46 if (!wiphy->addresses)
47 return sprintf(buf, "%pM\n", wiphy->perm_addr);
48
49 for (i = 0; i < wiphy->n_addresses; i++)
50 buf += sprintf(buf, "%pM\n", &wiphy->addresses[i].addr);
51
52 return buf - start;
53}
36 54
37static struct device_attribute ieee80211_dev_attrs[] = { 55static struct device_attribute ieee80211_dev_attrs[] = {
38 __ATTR_RO(index), 56 __ATTR_RO(index),
39 __ATTR_RO(macaddress), 57 __ATTR_RO(macaddress),
58 __ATTR_RO(address_mask),
59 __ATTR_RO(addresses),
40 {} 60 {}
41}; 61};
42 62
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 59361fdcb5d0..be2ab8c59e3a 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -227,8 +227,11 @@ unsigned int ieee80211_hdrlen(__le16 fc)
227 if (ieee80211_is_data(fc)) { 227 if (ieee80211_is_data(fc)) {
228 if (ieee80211_has_a4(fc)) 228 if (ieee80211_has_a4(fc))
229 hdrlen = 30; 229 hdrlen = 30;
230 if (ieee80211_is_data_qos(fc)) 230 if (ieee80211_is_data_qos(fc)) {
231 hdrlen += IEEE80211_QOS_CTL_LEN; 231 hdrlen += IEEE80211_QOS_CTL_LEN;
232 if (ieee80211_has_order(fc))
233 hdrlen += IEEE80211_HT_CTL_LEN;
234 }
232 goto out; 235 goto out;
233 } 236 }
234 237
@@ -285,7 +288,7 @@ static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
285 } 288 }
286} 289}
287 290
288int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr, 291int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
289 enum nl80211_iftype iftype) 292 enum nl80211_iftype iftype)
290{ 293{
291 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 294 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -383,7 +386,7 @@ int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr,
383} 386}
384EXPORT_SYMBOL(ieee80211_data_to_8023); 387EXPORT_SYMBOL(ieee80211_data_to_8023);
385 388
386int ieee80211_data_from_8023(struct sk_buff *skb, u8 *addr, 389int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
387 enum nl80211_iftype iftype, u8 *bssid, bool qos) 390 enum nl80211_iftype iftype, u8 *bssid, bool qos)
388{ 391{
389 struct ieee80211_hdr hdr; 392 struct ieee80211_hdr hdr;
@@ -497,6 +500,101 @@ int ieee80211_data_from_8023(struct sk_buff *skb, u8 *addr,
497} 500}
498EXPORT_SYMBOL(ieee80211_data_from_8023); 501EXPORT_SYMBOL(ieee80211_data_from_8023);
499 502
503
504void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
505 const u8 *addr, enum nl80211_iftype iftype,
506 const unsigned int extra_headroom)
507{
508 struct sk_buff *frame = NULL;
509 u16 ethertype;
510 u8 *payload;
511 const struct ethhdr *eth;
512 int remaining, err;
513 u8 dst[ETH_ALEN], src[ETH_ALEN];
514
515 err = ieee80211_data_to_8023(skb, addr, iftype);
516 if (err)
517 goto out;
518
519 /* skip the wrapping header */
520 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
521 if (!eth)
522 goto out;
523
524 while (skb != frame) {
525 u8 padding;
526 __be16 len = eth->h_proto;
527 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
528
529 remaining = skb->len;
530 memcpy(dst, eth->h_dest, ETH_ALEN);
531 memcpy(src, eth->h_source, ETH_ALEN);
532
533 padding = (4 - subframe_len) & 0x3;
534 /* the last MSDU has no padding */
535 if (subframe_len > remaining)
536 goto purge;
537
538 skb_pull(skb, sizeof(struct ethhdr));
539 /* reuse skb for the last subframe */
540 if (remaining <= subframe_len + padding)
541 frame = skb;
542 else {
543 unsigned int hlen = ALIGN(extra_headroom, 4);
544 /*
545 * Allocate and reserve two bytes more for payload
546 * alignment since sizeof(struct ethhdr) is 14.
547 */
548 frame = dev_alloc_skb(hlen + subframe_len + 2);
549 if (!frame)
550 goto purge;
551
552 skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
553 memcpy(skb_put(frame, ntohs(len)), skb->data,
554 ntohs(len));
555
556 eth = (struct ethhdr *)skb_pull(skb, ntohs(len) +
557 padding);
558 if (!eth) {
559 dev_kfree_skb(frame);
560 goto purge;
561 }
562 }
563
564 skb_reset_network_header(frame);
565 frame->dev = skb->dev;
566 frame->priority = skb->priority;
567
568 payload = frame->data;
569 ethertype = (payload[6] << 8) | payload[7];
570
571 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
572 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
573 compare_ether_addr(payload,
574 bridge_tunnel_header) == 0)) {
575 /* remove RFC1042 or Bridge-Tunnel
576 * encapsulation and replace EtherType */
577 skb_pull(frame, 6);
578 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
579 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
580 } else {
581 memcpy(skb_push(frame, sizeof(__be16)), &len,
582 sizeof(__be16));
583 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
584 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
585 }
586 __skb_queue_tail(list, frame);
587 }
588
589 return;
590
591 purge:
592 __skb_queue_purge(list);
593 out:
594 dev_kfree_skb(skb);
595}
596EXPORT_SYMBOL(ieee80211_amsdu_to_8023s);
597
500/* Given a data frame determine the 802.1p/1d tag to use. */ 598/* Given a data frame determine the 802.1p/1d tag to use. */
501unsigned int cfg80211_classify8021d(struct sk_buff *skb) 599unsigned int cfg80211_classify8021d(struct sk_buff *skb)
502{ 600{
@@ -720,3 +818,36 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
720 818
721 return err; 819 return err;
722} 820}
821
822u16 cfg80211_calculate_bitrate(struct rate_info *rate)
823{
824 int modulation, streams, bitrate;
825
826 if (!(rate->flags & RATE_INFO_FLAGS_MCS))
827 return rate->legacy;
828
829 /* the formula below does only work for MCS values smaller than 32 */
830 if (rate->mcs >= 32)
831 return 0;
832
833 modulation = rate->mcs & 7;
834 streams = (rate->mcs >> 3) + 1;
835
836 bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
837 13500000 : 6500000;
838
839 if (modulation < 4)
840 bitrate *= (modulation + 1);
841 else if (modulation == 4)
842 bitrate *= (modulation + 2);
843 else
844 bitrate *= (modulation + 3);
845
846 bitrate *= streams;
847
848 if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
849 bitrate = (bitrate / 9) * 10;
850
851 /* do NOT round down here */
852 return (bitrate + 50000) / 100000;
853}
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 54face3d4424..9ab51838849e 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1099,8 +1099,8 @@ int cfg80211_wext_siwpower(struct net_device *dev,
1099{ 1099{
1100 struct wireless_dev *wdev = dev->ieee80211_ptr; 1100 struct wireless_dev *wdev = dev->ieee80211_ptr;
1101 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1101 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
1102 bool ps = wdev->wext.ps; 1102 bool ps = wdev->ps;
1103 int timeout = wdev->wext.ps_timeout; 1103 int timeout = wdev->ps_timeout;
1104 int err; 1104 int err;
1105 1105
1106 if (wdev->iftype != NL80211_IFTYPE_STATION) 1106 if (wdev->iftype != NL80211_IFTYPE_STATION)
@@ -1133,8 +1133,8 @@ int cfg80211_wext_siwpower(struct net_device *dev,
1133 if (err) 1133 if (err)
1134 return err; 1134 return err;
1135 1135
1136 wdev->wext.ps = ps; 1136 wdev->ps = ps;
1137 wdev->wext.ps_timeout = timeout; 1137 wdev->ps_timeout = timeout;
1138 1138
1139 return 0; 1139 return 0;
1140 1140
@@ -1147,7 +1147,7 @@ int cfg80211_wext_giwpower(struct net_device *dev,
1147{ 1147{
1148 struct wireless_dev *wdev = dev->ieee80211_ptr; 1148 struct wireless_dev *wdev = dev->ieee80211_ptr;
1149 1149
1150 wrq->disabled = !wdev->wext.ps; 1150 wrq->disabled = !wdev->ps;
1151 1151
1152 return 0; 1152 return 0;
1153} 1153}
@@ -1204,21 +1204,47 @@ int cfg80211_wext_siwrate(struct net_device *dev,
1204 struct wireless_dev *wdev = dev->ieee80211_ptr; 1204 struct wireless_dev *wdev = dev->ieee80211_ptr;
1205 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1205 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
1206 struct cfg80211_bitrate_mask mask; 1206 struct cfg80211_bitrate_mask mask;
1207 u32 fixed, maxrate;
1208 struct ieee80211_supported_band *sband;
1209 int band, ridx;
1210 bool match = false;
1207 1211
1208 if (!rdev->ops->set_bitrate_mask) 1212 if (!rdev->ops->set_bitrate_mask)
1209 return -EOPNOTSUPP; 1213 return -EOPNOTSUPP;
1210 1214
1211 mask.fixed = 0; 1215 memset(&mask, 0, sizeof(mask));
1212 mask.maxrate = 0; 1216 fixed = 0;
1217 maxrate = (u32)-1;
1213 1218
1214 if (rate->value < 0) { 1219 if (rate->value < 0) {
1215 /* nothing */ 1220 /* nothing */
1216 } else if (rate->fixed) { 1221 } else if (rate->fixed) {
1217 mask.fixed = rate->value / 1000; /* kbps */ 1222 fixed = rate->value / 100000;
1218 } else { 1223 } else {
1219 mask.maxrate = rate->value / 1000; /* kbps */ 1224 maxrate = rate->value / 100000;
1220 } 1225 }
1221 1226
1227 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1228 sband = wdev->wiphy->bands[band];
1229 if (sband == NULL)
1230 continue;
1231 for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
1232 struct ieee80211_rate *srate = &sband->bitrates[ridx];
1233 if (fixed == srate->bitrate) {
1234 mask.control[band].legacy = 1 << ridx;
1235 match = true;
1236 break;
1237 }
1238 if (srate->bitrate <= maxrate) {
1239 mask.control[band].legacy |= 1 << ridx;
1240 match = true;
1241 }
1242 }
1243 }
1244
1245 if (!match)
1246 return -EINVAL;
1247
1222 return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask); 1248 return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask);
1223} 1249}
1224EXPORT_SYMBOL_GPL(cfg80211_wext_siwrate); 1250EXPORT_SYMBOL_GPL(cfg80211_wext_siwrate);
@@ -1257,10 +1283,7 @@ int cfg80211_wext_giwrate(struct net_device *dev,
1257 if (!(sinfo.filled & STATION_INFO_TX_BITRATE)) 1283 if (!(sinfo.filled & STATION_INFO_TX_BITRATE))
1258 return -EOPNOTSUPP; 1284 return -EOPNOTSUPP;
1259 1285
1260 rate->value = 0; 1286 rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
1261
1262 if (!(sinfo.txrate.flags & RATE_INFO_FLAGS_MCS))
1263 rate->value = 100000 * sinfo.txrate.legacy;
1264 1287
1265 return 0; 1288 return 0;
1266} 1289}
diff --git a/net/wireless/wext-proc.c b/net/wireless/wext-proc.c
index 273a7f77c834..8bafa31fa9f8 100644
--- a/net/wireless/wext-proc.c
+++ b/net/wireless/wext-proc.c
@@ -140,7 +140,7 @@ static const struct file_operations wireless_seq_fops = {
140 .release = seq_release_net, 140 .release = seq_release_net,
141}; 141};
142 142
143int wext_proc_init(struct net *net) 143int __net_init wext_proc_init(struct net *net)
144{ 144{
145 /* Create /proc/net/wireless entry */ 145 /* Create /proc/net/wireless entry */
146 if (!proc_net_fops_create(net, "wireless", S_IRUGO, &wireless_seq_fops)) 146 if (!proc_net_fops_create(net, "wireless", S_IRUGO, &wireless_seq_fops))
@@ -149,7 +149,7 @@ int wext_proc_init(struct net *net)
149 return 0; 149 return 0;
150} 150}
151 151
152void wext_proc_exit(struct net *net) 152void __net_exit wext_proc_exit(struct net *net)
153{ 153{
154 proc_net_remove(net, "wireless"); 154 proc_net_remove(net, "wireless");
155} 155}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index e3219e4cd044..9796f3ed1edb 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -55,6 +55,7 @@
55#include <linux/notifier.h> 55#include <linux/notifier.h>
56#include <linux/init.h> 56#include <linux/init.h>
57#include <linux/compat.h> 57#include <linux/compat.h>
58#include <linux/ctype.h>
58 59
59#include <net/x25.h> 60#include <net/x25.h>
60#include <net/compat.h> 61#include <net/compat.h>
@@ -512,15 +513,20 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
512{ 513{
513 struct sock *sk; 514 struct sock *sk;
514 struct x25_sock *x25; 515 struct x25_sock *x25;
515 int rc = -ESOCKTNOSUPPORT; 516 int rc = -EAFNOSUPPORT;
516 517
517 if (!net_eq(net, &init_net)) 518 if (!net_eq(net, &init_net))
518 return -EAFNOSUPPORT; 519 goto out;
519 520
520 if (sock->type != SOCK_SEQPACKET || protocol) 521 rc = -ESOCKTNOSUPPORT;
522 if (sock->type != SOCK_SEQPACKET)
521 goto out; 523 goto out;
522 524
523 rc = -ENOMEM; 525 rc = -EINVAL;
526 if (protocol)
527 goto out;
528
529 rc = -ENOBUFS;
524 if ((sk = x25_alloc_socket(net)) == NULL) 530 if ((sk = x25_alloc_socket(net)) == NULL)
525 goto out; 531 goto out;
526 532
@@ -643,7 +649,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
643{ 649{
644 struct sock *sk = sock->sk; 650 struct sock *sk = sock->sk;
645 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; 651 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
646 int rc = 0; 652 int len, i, rc = 0;
647 653
648 lock_kernel(); 654 lock_kernel();
649 if (!sock_flag(sk, SOCK_ZAPPED) || 655 if (!sock_flag(sk, SOCK_ZAPPED) ||
@@ -653,6 +659,14 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
653 goto out; 659 goto out;
654 } 660 }
655 661
662 len = strlen(addr->sx25_addr.x25_addr);
663 for (i = 0; i < len; i++) {
664 if (!isdigit(addr->sx25_addr.x25_addr[i])) {
665 rc = -EINVAL;
666 goto out;
667 }
668 }
669
656 x25_sk(sk)->source_addr = addr->sx25_addr; 670 x25_sk(sk)->source_addr = addr->sx25_addr;
657 x25_insert_socket(sk); 671 x25_insert_socket(sk);
658 sock_reset_flag(sk, SOCK_ZAPPED); 672 sock_reset_flag(sk, SOCK_ZAPPED);
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index 3e1efe534645..52e304212241 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -53,7 +53,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
53 if (!sock_owned_by_user(sk)) { 53 if (!sock_owned_by_user(sk)) {
54 queued = x25_process_rx_frame(sk, skb); 54 queued = x25_process_rx_frame(sk, skb);
55 } else { 55 } else {
56 sk_add_backlog(sk, skb); 56 queued = !sk_add_backlog(sk, skb);
57 } 57 }
58 bh_unlock_sock(sk); 58 bh_unlock_sock(sk);
59 sock_put(sk); 59 sock_put(sk);
diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c
index 0a04e62e0e18..7ff373792324 100644
--- a/net/x25/x25_proc.c
+++ b/net/x25/x25_proc.c
@@ -25,49 +25,17 @@
25#include <net/x25.h> 25#include <net/x25.h>
26 26
27#ifdef CONFIG_PROC_FS 27#ifdef CONFIG_PROC_FS
28static __inline__ struct x25_route *x25_get_route_idx(loff_t pos)
29{
30 struct list_head *route_entry;
31 struct x25_route *rt = NULL;
32
33 list_for_each(route_entry, &x25_route_list) {
34 rt = list_entry(route_entry, struct x25_route, node);
35 if (!pos--)
36 goto found;
37 }
38 rt = NULL;
39found:
40 return rt;
41}
42 28
43static void *x25_seq_route_start(struct seq_file *seq, loff_t *pos) 29static void *x25_seq_route_start(struct seq_file *seq, loff_t *pos)
44 __acquires(x25_route_list_lock) 30 __acquires(x25_route_list_lock)
45{ 31{
46 loff_t l = *pos;
47
48 read_lock_bh(&x25_route_list_lock); 32 read_lock_bh(&x25_route_list_lock);
49 return l ? x25_get_route_idx(--l) : SEQ_START_TOKEN; 33 return seq_list_start_head(&x25_route_list, *pos);
50} 34}
51 35
52static void *x25_seq_route_next(struct seq_file *seq, void *v, loff_t *pos) 36static void *x25_seq_route_next(struct seq_file *seq, void *v, loff_t *pos)
53{ 37{
54 struct x25_route *rt; 38 return seq_list_next(v, &x25_route_list, pos);
55
56 ++*pos;
57 if (v == SEQ_START_TOKEN) {
58 rt = NULL;
59 if (!list_empty(&x25_route_list))
60 rt = list_entry(x25_route_list.next,
61 struct x25_route, node);
62 goto out;
63 }
64 rt = v;
65 if (rt->node.next != &x25_route_list)
66 rt = list_entry(rt->node.next, struct x25_route, node);
67 else
68 rt = NULL;
69out:
70 return rt;
71} 39}
72 40
73static void x25_seq_route_stop(struct seq_file *seq, void *v) 41static void x25_seq_route_stop(struct seq_file *seq, void *v)
@@ -78,9 +46,9 @@ static void x25_seq_route_stop(struct seq_file *seq, void *v)
78 46
79static int x25_seq_route_show(struct seq_file *seq, void *v) 47static int x25_seq_route_show(struct seq_file *seq, void *v)
80{ 48{
81 struct x25_route *rt; 49 struct x25_route *rt = list_entry(v, struct x25_route, node);
82 50
83 if (v == SEQ_START_TOKEN) { 51 if (v == &x25_route_list) {
84 seq_puts(seq, "Address Digits Device\n"); 52 seq_puts(seq, "Address Digits Device\n");
85 goto out; 53 goto out;
86 } 54 }
@@ -93,40 +61,16 @@ out:
93 return 0; 61 return 0;
94} 62}
95 63
96static __inline__ struct sock *x25_get_socket_idx(loff_t pos)
97{
98 struct sock *s;
99 struct hlist_node *node;
100
101 sk_for_each(s, node, &x25_list)
102 if (!pos--)
103 goto found;
104 s = NULL;
105found:
106 return s;
107}
108
109static void *x25_seq_socket_start(struct seq_file *seq, loff_t *pos) 64static void *x25_seq_socket_start(struct seq_file *seq, loff_t *pos)
110 __acquires(x25_list_lock) 65 __acquires(x25_list_lock)
111{ 66{
112 loff_t l = *pos;
113
114 read_lock_bh(&x25_list_lock); 67 read_lock_bh(&x25_list_lock);
115 return l ? x25_get_socket_idx(--l) : SEQ_START_TOKEN; 68 return seq_hlist_start_head(&x25_list, *pos);
116} 69}
117 70
118static void *x25_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos) 71static void *x25_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
119{ 72{
120 struct sock *s; 73 return seq_hlist_next(v, &x25_list, pos);
121
122 ++*pos;
123 if (v == SEQ_START_TOKEN) {
124 s = sk_head(&x25_list);
125 goto out;
126 }
127 s = sk_next(v);
128out:
129 return s;
130} 74}
131 75
132static void x25_seq_socket_stop(struct seq_file *seq, void *v) 76static void x25_seq_socket_stop(struct seq_file *seq, void *v)
@@ -148,7 +92,7 @@ static int x25_seq_socket_show(struct seq_file *seq, void *v)
148 goto out; 92 goto out;
149 } 93 }
150 94
151 s = v; 95 s = sk_entry(v);
152 x25 = x25_sk(s); 96 x25 = x25_sk(s);
153 97
154 if (!x25->neighbour || (dev = x25->neighbour->dev) == NULL) 98 if (!x25->neighbour || (dev = x25->neighbour->dev) == NULL)
@@ -170,51 +114,16 @@ out:
170 return 0; 114 return 0;
171} 115}
172 116
173static __inline__ struct x25_forward *x25_get_forward_idx(loff_t pos)
174{
175 struct x25_forward *f;
176 struct list_head *entry;
177
178 list_for_each(entry, &x25_forward_list) {
179 f = list_entry(entry, struct x25_forward, node);
180 if (!pos--)
181 goto found;
182 }
183
184 f = NULL;
185found:
186 return f;
187}
188
189static void *x25_seq_forward_start(struct seq_file *seq, loff_t *pos) 117static void *x25_seq_forward_start(struct seq_file *seq, loff_t *pos)
190 __acquires(x25_forward_list_lock) 118 __acquires(x25_forward_list_lock)
191{ 119{
192 loff_t l = *pos;
193
194 read_lock_bh(&x25_forward_list_lock); 120 read_lock_bh(&x25_forward_list_lock);
195 return l ? x25_get_forward_idx(--l) : SEQ_START_TOKEN; 121 return seq_list_start_head(&x25_forward_list, *pos);
196} 122}
197 123
198static void *x25_seq_forward_next(struct seq_file *seq, void *v, loff_t *pos) 124static void *x25_seq_forward_next(struct seq_file *seq, void *v, loff_t *pos)
199{ 125{
200 struct x25_forward *f; 126 return seq_list_next(v, &x25_forward_list, pos);
201
202 ++*pos;
203 if (v == SEQ_START_TOKEN) {
204 f = NULL;
205 if (!list_empty(&x25_forward_list))
206 f = list_entry(x25_forward_list.next,
207 struct x25_forward, node);
208 goto out;
209 }
210 f = v;
211 if (f->node.next != &x25_forward_list)
212 f = list_entry(f->node.next, struct x25_forward, node);
213 else
214 f = NULL;
215out:
216 return f;
217
218} 127}
219 128
220static void x25_seq_forward_stop(struct seq_file *seq, void *v) 129static void x25_seq_forward_stop(struct seq_file *seq, void *v)
@@ -225,9 +134,9 @@ static void x25_seq_forward_stop(struct seq_file *seq, void *v)
225 134
226static int x25_seq_forward_show(struct seq_file *seq, void *v) 135static int x25_seq_forward_show(struct seq_file *seq, void *v)
227{ 136{
228 struct x25_forward *f; 137 struct x25_forward *f = list_entry(v, struct x25_forward, node);
229 138
230 if (v == SEQ_START_TOKEN) { 139 if (v == &x25_forward_list) {
231 seq_printf(seq, "lci dev1 dev2\n"); 140 seq_printf(seq, "lci dev1 dev2\n");
232 goto out; 141 goto out;
233 } 142 }
@@ -236,7 +145,6 @@ static int x25_seq_forward_show(struct seq_file *seq, void *v)
236 145
237 seq_printf(seq, "%d %-10s %-10s\n", 146 seq_printf(seq, "%d %-10s %-10s\n",
238 f->lci, f->dev1->name, f->dev2->name); 147 f->lci, f->dev1->name, f->dev2->name);
239
240out: 148out:
241 return 0; 149 return 0;
242} 150}
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 743c0134a6a9..8b4d6e3246e5 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -125,6 +125,22 @@ static struct xfrm_algo_desc aead_list[] = {
125 .sadb_alg_maxbits = 256 125 .sadb_alg_maxbits = 256
126 } 126 }
127}, 127},
128{
129 .name = "rfc4543(gcm(aes))",
130
131 .uinfo = {
132 .aead = {
133 .icv_truncbits = 128,
134 }
135 },
136
137 .desc = {
138 .sadb_alg_id = SADB_X_EALG_NULL_AES_GMAC,
139 .sadb_alg_ivlen = 8,
140 .sadb_alg_minbits = 128,
141 .sadb_alg_maxbits = 256
142 }
143},
128}; 144};
129 145
130static struct xfrm_algo_desc aalg_list[] = { 146static struct xfrm_algo_desc aalg_list[] = {
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index e0009c17d809..45f1c98d4fce 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -152,7 +152,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
152 goto drop; 152 goto drop;
153 } 153 }
154 154
155 x = xfrm_state_lookup(net, daddr, spi, nexthdr, family); 155 x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family);
156 if (x == NULL) { 156 if (x == NULL) {
157 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); 157 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
158 xfrm_audit_state_notfound(skb, family, spi, seq); 158 xfrm_audit_state_notfound(skb, family, spi, seq);
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index 42cd18391f46..0fc5ff66d1fa 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -30,12 +30,12 @@
30 30
31struct ipcomp_tfms { 31struct ipcomp_tfms {
32 struct list_head list; 32 struct list_head list;
33 struct crypto_comp **tfms; 33 struct crypto_comp * __percpu *tfms;
34 int users; 34 int users;
35}; 35};
36 36
37static DEFINE_MUTEX(ipcomp_resource_mutex); 37static DEFINE_MUTEX(ipcomp_resource_mutex);
38static void **ipcomp_scratches; 38static void * __percpu *ipcomp_scratches;
39static int ipcomp_scratch_users; 39static int ipcomp_scratch_users;
40static LIST_HEAD(ipcomp_tfms_list); 40static LIST_HEAD(ipcomp_tfms_list);
41 41
@@ -200,7 +200,7 @@ EXPORT_SYMBOL_GPL(ipcomp_output);
200static void ipcomp_free_scratches(void) 200static void ipcomp_free_scratches(void)
201{ 201{
202 int i; 202 int i;
203 void **scratches; 203 void * __percpu *scratches;
204 204
205 if (--ipcomp_scratch_users) 205 if (--ipcomp_scratch_users)
206 return; 206 return;
@@ -215,10 +215,10 @@ static void ipcomp_free_scratches(void)
215 free_percpu(scratches); 215 free_percpu(scratches);
216} 216}
217 217
218static void **ipcomp_alloc_scratches(void) 218static void * __percpu *ipcomp_alloc_scratches(void)
219{ 219{
220 int i; 220 int i;
221 void **scratches; 221 void * __percpu *scratches;
222 222
223 if (ipcomp_scratch_users++) 223 if (ipcomp_scratch_users++)
224 return ipcomp_scratches; 224 return ipcomp_scratches;
@@ -239,7 +239,7 @@ static void **ipcomp_alloc_scratches(void)
239 return scratches; 239 return scratches;
240} 240}
241 241
242static void ipcomp_free_tfms(struct crypto_comp **tfms) 242static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms)
243{ 243{
244 struct ipcomp_tfms *pos; 244 struct ipcomp_tfms *pos;
245 int cpu; 245 int cpu;
@@ -267,10 +267,10 @@ static void ipcomp_free_tfms(struct crypto_comp **tfms)
267 free_percpu(tfms); 267 free_percpu(tfms);
268} 268}
269 269
270static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name) 270static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
271{ 271{
272 struct ipcomp_tfms *pos; 272 struct ipcomp_tfms *pos;
273 struct crypto_comp **tfms; 273 struct crypto_comp * __percpu *tfms;
274 int cpu; 274 int cpu;
275 275
276 /* This can be any valid CPU ID so we don't need locking. */ 276 /* This can be any valid CPU ID so we don't need locking. */
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index cb81ca35b0d6..843e066649cb 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -469,16 +469,16 @@ static inline int xfrm_byidx_should_resize(struct net *net, int total)
469 return 0; 469 return 0;
470} 470}
471 471
472void xfrm_spd_getinfo(struct xfrmk_spdinfo *si) 472void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
473{ 473{
474 read_lock_bh(&xfrm_policy_lock); 474 read_lock_bh(&xfrm_policy_lock);
475 si->incnt = init_net.xfrm.policy_count[XFRM_POLICY_IN]; 475 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
476 si->outcnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT]; 476 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
477 si->fwdcnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD]; 477 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
478 si->inscnt = init_net.xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 478 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
479 si->outscnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 479 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
480 si->fwdscnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 480 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
481 si->spdhcnt = init_net.xfrm.policy_idx_hmask; 481 si->spdhcnt = net->xfrm.policy_idx_hmask;
482 si->spdhmcnt = xfrm_policy_hashmax; 482 si->spdhmcnt = xfrm_policy_hashmax;
483 read_unlock_bh(&xfrm_policy_lock); 483 read_unlock_bh(&xfrm_policy_lock);
484} 484}
@@ -556,6 +556,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
556 struct hlist_head *chain; 556 struct hlist_head *chain;
557 struct hlist_node *entry, *newpos; 557 struct hlist_node *entry, *newpos;
558 struct dst_entry *gc_list; 558 struct dst_entry *gc_list;
559 u32 mark = policy->mark.v & policy->mark.m;
559 560
560 write_lock_bh(&xfrm_policy_lock); 561 write_lock_bh(&xfrm_policy_lock);
561 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); 562 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
@@ -564,6 +565,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
564 hlist_for_each_entry(pol, entry, chain, bydst) { 565 hlist_for_each_entry(pol, entry, chain, bydst) {
565 if (pol->type == policy->type && 566 if (pol->type == policy->type &&
566 !selector_cmp(&pol->selector, &policy->selector) && 567 !selector_cmp(&pol->selector, &policy->selector) &&
568 (mark & pol->mark.m) == pol->mark.v &&
567 xfrm_sec_ctx_match(pol->security, policy->security) && 569 xfrm_sec_ctx_match(pol->security, policy->security) &&
568 !WARN_ON(delpol)) { 570 !WARN_ON(delpol)) {
569 if (excl) { 571 if (excl) {
@@ -635,8 +637,8 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
635} 637}
636EXPORT_SYMBOL(xfrm_policy_insert); 638EXPORT_SYMBOL(xfrm_policy_insert);
637 639
638struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir, 640struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
639 struct xfrm_selector *sel, 641 int dir, struct xfrm_selector *sel,
640 struct xfrm_sec_ctx *ctx, int delete, 642 struct xfrm_sec_ctx *ctx, int delete,
641 int *err) 643 int *err)
642{ 644{
@@ -650,6 +652,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
650 ret = NULL; 652 ret = NULL;
651 hlist_for_each_entry(pol, entry, chain, bydst) { 653 hlist_for_each_entry(pol, entry, chain, bydst) {
652 if (pol->type == type && 654 if (pol->type == type &&
655 (mark & pol->mark.m) == pol->mark.v &&
653 !selector_cmp(sel, &pol->selector) && 656 !selector_cmp(sel, &pol->selector) &&
654 xfrm_sec_ctx_match(ctx, pol->security)) { 657 xfrm_sec_ctx_match(ctx, pol->security)) {
655 xfrm_pol_hold(pol); 658 xfrm_pol_hold(pol);
@@ -676,8 +679,8 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
676} 679}
677EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 680EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
678 681
679struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id, 682struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
680 int delete, int *err) 683 int dir, u32 id, int delete, int *err)
681{ 684{
682 struct xfrm_policy *pol, *ret; 685 struct xfrm_policy *pol, *ret;
683 struct hlist_head *chain; 686 struct hlist_head *chain;
@@ -692,7 +695,8 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id,
692 chain = net->xfrm.policy_byidx + idx_hash(net, id); 695 chain = net->xfrm.policy_byidx + idx_hash(net, id);
693 ret = NULL; 696 ret = NULL;
694 hlist_for_each_entry(pol, entry, chain, byidx) { 697 hlist_for_each_entry(pol, entry, chain, byidx) {
695 if (pol->type == type && pol->index == id) { 698 if (pol->type == type && pol->index == id &&
699 (mark & pol->mark.m) == pol->mark.v) {
696 xfrm_pol_hold(pol); 700 xfrm_pol_hold(pol);
697 if (delete) { 701 if (delete) {
698 *err = security_xfrm_policy_delete( 702 *err = security_xfrm_policy_delete(
@@ -771,7 +775,8 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
771 775
772int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) 776int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
773{ 777{
774 int dir, err = 0; 778 int dir, err = 0, cnt = 0;
779 struct xfrm_policy *dp;
775 780
776 write_lock_bh(&xfrm_policy_lock); 781 write_lock_bh(&xfrm_policy_lock);
777 782
@@ -789,8 +794,10 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
789 &net->xfrm.policy_inexact[dir], bydst) { 794 &net->xfrm.policy_inexact[dir], bydst) {
790 if (pol->type != type) 795 if (pol->type != type)
791 continue; 796 continue;
792 __xfrm_policy_unlink(pol, dir); 797 dp = __xfrm_policy_unlink(pol, dir);
793 write_unlock_bh(&xfrm_policy_lock); 798 write_unlock_bh(&xfrm_policy_lock);
799 if (dp)
800 cnt++;
794 801
795 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, 802 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
796 audit_info->sessionid, 803 audit_info->sessionid,
@@ -809,8 +816,10 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
809 bydst) { 816 bydst) {
810 if (pol->type != type) 817 if (pol->type != type)
811 continue; 818 continue;
812 __xfrm_policy_unlink(pol, dir); 819 dp = __xfrm_policy_unlink(pol, dir);
813 write_unlock_bh(&xfrm_policy_lock); 820 write_unlock_bh(&xfrm_policy_lock);
821 if (dp)
822 cnt++;
814 823
815 xfrm_audit_policy_delete(pol, 1, 824 xfrm_audit_policy_delete(pol, 1,
816 audit_info->loginuid, 825 audit_info->loginuid,
@@ -824,6 +833,8 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
824 } 833 }
825 834
826 } 835 }
836 if (!cnt)
837 err = -ESRCH;
827 atomic_inc(&flow_cache_genid); 838 atomic_inc(&flow_cache_genid);
828out: 839out:
829 write_unlock_bh(&xfrm_policy_lock); 840 write_unlock_bh(&xfrm_policy_lock);
@@ -909,6 +920,7 @@ static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
909 int match, ret = -ESRCH; 920 int match, ret = -ESRCH;
910 921
911 if (pol->family != family || 922 if (pol->family != family ||
923 (fl->mark & pol->mark.m) != pol->mark.v ||
912 pol->type != type) 924 pol->type != type)
913 return ret; 925 return ret;
914 926
@@ -1033,6 +1045,10 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struc
1033 int err = 0; 1045 int err = 0;
1034 1046
1035 if (match) { 1047 if (match) {
1048 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1049 pol = NULL;
1050 goto out;
1051 }
1036 err = security_xfrm_policy_lookup(pol->security, 1052 err = security_xfrm_policy_lookup(pol->security,
1037 fl->secid, 1053 fl->secid,
1038 policy_to_flow_dir(dir)); 1054 policy_to_flow_dir(dir));
@@ -1045,6 +1061,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struc
1045 } else 1061 } else
1046 pol = NULL; 1062 pol = NULL;
1047 } 1063 }
1064out:
1048 read_unlock_bh(&xfrm_policy_lock); 1065 read_unlock_bh(&xfrm_policy_lock);
1049 return pol; 1066 return pol;
1050} 1067}
@@ -1137,6 +1154,7 @@ static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
1137 } 1154 }
1138 newp->lft = old->lft; 1155 newp->lft = old->lft;
1139 newp->curlft = old->curlft; 1156 newp->curlft = old->curlft;
1157 newp->mark = old->mark;
1140 newp->action = old->action; 1158 newp->action = old->action;
1141 newp->flags = old->flags; 1159 newp->flags = old->flags;
1142 newp->xfrm_nr = old->xfrm_nr; 1160 newp->xfrm_nr = old->xfrm_nr;
@@ -1309,15 +1327,28 @@ static inline int xfrm_get_tos(struct flowi *fl, int family)
1309 return tos; 1327 return tos;
1310} 1328}
1311 1329
1312static inline struct xfrm_dst *xfrm_alloc_dst(int family) 1330static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1313{ 1331{
1314 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1332 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1333 struct dst_ops *dst_ops;
1315 struct xfrm_dst *xdst; 1334 struct xfrm_dst *xdst;
1316 1335
1317 if (!afinfo) 1336 if (!afinfo)
1318 return ERR_PTR(-EINVAL); 1337 return ERR_PTR(-EINVAL);
1319 1338
1320 xdst = dst_alloc(afinfo->dst_ops) ?: ERR_PTR(-ENOBUFS); 1339 switch (family) {
1340 case AF_INET:
1341 dst_ops = &net->xfrm.xfrm4_dst_ops;
1342 break;
1343#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1344 case AF_INET6:
1345 dst_ops = &net->xfrm.xfrm6_dst_ops;
1346 break;
1347#endif
1348 default:
1349 BUG();
1350 }
1351 xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS);
1321 1352
1322 xfrm_policy_put_afinfo(afinfo); 1353 xfrm_policy_put_afinfo(afinfo);
1323 1354
@@ -1341,7 +1372,8 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1341 return err; 1372 return err;
1342} 1373}
1343 1374
1344static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev) 1375static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1376 struct flowi *fl)
1345{ 1377{
1346 struct xfrm_policy_afinfo *afinfo = 1378 struct xfrm_policy_afinfo *afinfo =
1347 xfrm_policy_get_afinfo(xdst->u.dst.ops->family); 1379 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
@@ -1350,7 +1382,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
1350 if (!afinfo) 1382 if (!afinfo)
1351 return -EINVAL; 1383 return -EINVAL;
1352 1384
1353 err = afinfo->fill_dst(xdst, dev); 1385 err = afinfo->fill_dst(xdst, dev, fl);
1354 1386
1355 xfrm_policy_put_afinfo(afinfo); 1387 xfrm_policy_put_afinfo(afinfo);
1356 1388
@@ -1366,6 +1398,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1366 struct flowi *fl, 1398 struct flowi *fl,
1367 struct dst_entry *dst) 1399 struct dst_entry *dst)
1368{ 1400{
1401 struct net *net = xp_net(policy);
1369 unsigned long now = jiffies; 1402 unsigned long now = jiffies;
1370 struct net_device *dev; 1403 struct net_device *dev;
1371 struct dst_entry *dst_prev = NULL; 1404 struct dst_entry *dst_prev = NULL;
@@ -1389,7 +1422,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1389 dst_hold(dst); 1422 dst_hold(dst);
1390 1423
1391 for (; i < nx; i++) { 1424 for (; i < nx; i++) {
1392 struct xfrm_dst *xdst = xfrm_alloc_dst(family); 1425 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1393 struct dst_entry *dst1 = &xdst->u.dst; 1426 struct dst_entry *dst1 = &xdst->u.dst;
1394 1427
1395 err = PTR_ERR(xdst); 1428 err = PTR_ERR(xdst);
@@ -1445,7 +1478,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1445 if (!dev) 1478 if (!dev)
1446 goto free_dst; 1479 goto free_dst;
1447 1480
1448 /* Copy neighbout for reachability confirmation */ 1481 /* Copy neighbour for reachability confirmation */
1449 dst0->neighbour = neigh_clone(dst->neighbour); 1482 dst0->neighbour = neigh_clone(dst->neighbour);
1450 1483
1451 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len); 1484 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
@@ -1454,7 +1487,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1454 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) { 1487 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1455 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev; 1488 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1456 1489
1457 err = xfrm_fill_dst(xdst, dev); 1490 err = xfrm_fill_dst(xdst, dev, fl);
1458 if (err) 1491 if (err)
1459 goto free_dst; 1492 goto free_dst;
1460 1493
@@ -2031,8 +2064,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2031 int res; 2064 int res;
2032 2065
2033 if (xfrm_decode_session(skb, &fl, family) < 0) { 2066 if (xfrm_decode_session(skb, &fl, family) < 0) {
2034 /* XXX: we should have something like FWDHDRERROR here. */ 2067 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2035 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2036 return 0; 2068 return 0;
2037 } 2069 }
2038 2070
@@ -2279,6 +2311,7 @@ EXPORT_SYMBOL(xfrm_bundle_ok);
2279 2311
2280int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 2312int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2281{ 2313{
2314 struct net *net;
2282 int err = 0; 2315 int err = 0;
2283 if (unlikely(afinfo == NULL)) 2316 if (unlikely(afinfo == NULL))
2284 return -EINVAL; 2317 return -EINVAL;
@@ -2302,6 +2335,27 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2302 xfrm_policy_afinfo[afinfo->family] = afinfo; 2335 xfrm_policy_afinfo[afinfo->family] = afinfo;
2303 } 2336 }
2304 write_unlock_bh(&xfrm_policy_afinfo_lock); 2337 write_unlock_bh(&xfrm_policy_afinfo_lock);
2338
2339 rtnl_lock();
2340 for_each_net(net) {
2341 struct dst_ops *xfrm_dst_ops;
2342
2343 switch (afinfo->family) {
2344 case AF_INET:
2345 xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
2346 break;
2347#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2348 case AF_INET6:
2349 xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
2350 break;
2351#endif
2352 default:
2353 BUG();
2354 }
2355 *xfrm_dst_ops = *afinfo->dst_ops;
2356 }
2357 rtnl_unlock();
2358
2305 return err; 2359 return err;
2306} 2360}
2307EXPORT_SYMBOL(xfrm_policy_register_afinfo); 2361EXPORT_SYMBOL(xfrm_policy_register_afinfo);
@@ -2332,6 +2386,22 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2332} 2386}
2333EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 2387EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2334 2388
2389static void __net_init xfrm_dst_ops_init(struct net *net)
2390{
2391 struct xfrm_policy_afinfo *afinfo;
2392
2393 read_lock_bh(&xfrm_policy_afinfo_lock);
2394 afinfo = xfrm_policy_afinfo[AF_INET];
2395 if (afinfo)
2396 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
2397#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2398 afinfo = xfrm_policy_afinfo[AF_INET6];
2399 if (afinfo)
2400 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
2401#endif
2402 read_unlock_bh(&xfrm_policy_afinfo_lock);
2403}
2404
2335static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 2405static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
2336{ 2406{
2337 struct xfrm_policy_afinfo *afinfo; 2407 struct xfrm_policy_afinfo *afinfo;
@@ -2369,19 +2439,19 @@ static int __net_init xfrm_statistics_init(struct net *net)
2369{ 2439{
2370 int rv; 2440 int rv;
2371 2441
2372 if (snmp_mib_init((void **)net->mib.xfrm_statistics, 2442 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
2373 sizeof(struct linux_xfrm_mib)) < 0) 2443 sizeof(struct linux_xfrm_mib)) < 0)
2374 return -ENOMEM; 2444 return -ENOMEM;
2375 rv = xfrm_proc_init(net); 2445 rv = xfrm_proc_init(net);
2376 if (rv < 0) 2446 if (rv < 0)
2377 snmp_mib_free((void **)net->mib.xfrm_statistics); 2447 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2378 return rv; 2448 return rv;
2379} 2449}
2380 2450
2381static void xfrm_statistics_fini(struct net *net) 2451static void xfrm_statistics_fini(struct net *net)
2382{ 2452{
2383 xfrm_proc_fini(net); 2453 xfrm_proc_fini(net);
2384 snmp_mib_free((void **)net->mib.xfrm_statistics); 2454 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2385} 2455}
2386#else 2456#else
2387static int __net_init xfrm_statistics_init(struct net *net) 2457static int __net_init xfrm_statistics_init(struct net *net)
@@ -2494,6 +2564,7 @@ static int __net_init xfrm_net_init(struct net *net)
2494 rv = xfrm_policy_init(net); 2564 rv = xfrm_policy_init(net);
2495 if (rv < 0) 2565 if (rv < 0)
2496 goto out_policy; 2566 goto out_policy;
2567 xfrm_dst_ops_init(net);
2497 rv = xfrm_sysctl_init(net); 2568 rv = xfrm_sysctl_init(net);
2498 if (rv < 0) 2569 if (rv < 0)
2499 goto out_sysctl; 2570 goto out_sysctl;
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index fef8db553e8d..58d9ae005597 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -15,7 +15,7 @@
15#include <net/snmp.h> 15#include <net/snmp.h>
16#include <net/xfrm.h> 16#include <net/xfrm.h>
17 17
18static struct snmp_mib xfrm_mib_list[] = { 18static const struct snmp_mib xfrm_mib_list[] = {
19 SNMP_MIB_ITEM("XfrmInError", LINUX_MIB_XFRMINERROR), 19 SNMP_MIB_ITEM("XfrmInError", LINUX_MIB_XFRMINERROR),
20 SNMP_MIB_ITEM("XfrmInBufferError", LINUX_MIB_XFRMINBUFFERERROR), 20 SNMP_MIB_ITEM("XfrmInBufferError", LINUX_MIB_XFRMINBUFFERERROR),
21 SNMP_MIB_ITEM("XfrmInHdrError", LINUX_MIB_XFRMINHDRERROR), 21 SNMP_MIB_ITEM("XfrmInHdrError", LINUX_MIB_XFRMINHDRERROR),
@@ -41,6 +41,7 @@ static struct snmp_mib xfrm_mib_list[] = {
41 SNMP_MIB_ITEM("XfrmOutPolBlock", LINUX_MIB_XFRMOUTPOLBLOCK), 41 SNMP_MIB_ITEM("XfrmOutPolBlock", LINUX_MIB_XFRMOUTPOLBLOCK),
42 SNMP_MIB_ITEM("XfrmOutPolDead", LINUX_MIB_XFRMOUTPOLDEAD), 42 SNMP_MIB_ITEM("XfrmOutPolDead", LINUX_MIB_XFRMOUTPOLDEAD),
43 SNMP_MIB_ITEM("XfrmOutPolError", LINUX_MIB_XFRMOUTPOLERROR), 43 SNMP_MIB_ITEM("XfrmOutPolError", LINUX_MIB_XFRMOUTPOLERROR),
44 SNMP_MIB_ITEM("XfrmFwdHdrError", LINUX_MIB_XFRMFWDHDRERROR),
44 SNMP_MIB_SENTINEL 45 SNMP_MIB_SENTINEL
45}; 46};
46 47
@@ -50,7 +51,8 @@ static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
50 int i; 51 int i;
51 for (i=0; xfrm_mib_list[i].name; i++) 52 for (i=0; xfrm_mib_list[i].name; i++)
52 seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name, 53 seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
53 snmp_fold_field((void **)net->mib.xfrm_statistics, 54 snmp_fold_field((void __percpu **)
55 net->mib.xfrm_statistics,
54 xfrm_mib_list[i].entry)); 56 xfrm_mib_list[i].entry));
55 return 0; 57 return 0;
56} 58}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index d847f1a52b44..17d5b96f2fc8 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -603,13 +603,14 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audi
603 603
604int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info) 604int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info)
605{ 605{
606 int i, err = 0; 606 int i, err = 0, cnt = 0;
607 607
608 spin_lock_bh(&xfrm_state_lock); 608 spin_lock_bh(&xfrm_state_lock);
609 err = xfrm_state_flush_secctx_check(net, proto, audit_info); 609 err = xfrm_state_flush_secctx_check(net, proto, audit_info);
610 if (err) 610 if (err)
611 goto out; 611 goto out;
612 612
613 err = -ESRCH;
613 for (i = 0; i <= net->xfrm.state_hmask; i++) { 614 for (i = 0; i <= net->xfrm.state_hmask; i++) {
614 struct hlist_node *entry; 615 struct hlist_node *entry;
615 struct xfrm_state *x; 616 struct xfrm_state *x;
@@ -626,13 +627,16 @@ restart:
626 audit_info->sessionid, 627 audit_info->sessionid,
627 audit_info->secid); 628 audit_info->secid);
628 xfrm_state_put(x); 629 xfrm_state_put(x);
630 if (!err)
631 cnt++;
629 632
630 spin_lock_bh(&xfrm_state_lock); 633 spin_lock_bh(&xfrm_state_lock);
631 goto restart; 634 goto restart;
632 } 635 }
633 } 636 }
634 } 637 }
635 err = 0; 638 if (cnt)
639 err = 0;
636 640
637out: 641out:
638 spin_unlock_bh(&xfrm_state_lock); 642 spin_unlock_bh(&xfrm_state_lock);
@@ -641,11 +645,11 @@ out:
641} 645}
642EXPORT_SYMBOL(xfrm_state_flush); 646EXPORT_SYMBOL(xfrm_state_flush);
643 647
644void xfrm_sad_getinfo(struct xfrmk_sadinfo *si) 648void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
645{ 649{
646 spin_lock_bh(&xfrm_state_lock); 650 spin_lock_bh(&xfrm_state_lock);
647 si->sadcnt = init_net.xfrm.state_num; 651 si->sadcnt = net->xfrm.state_num;
648 si->sadhcnt = init_net.xfrm.state_hmask; 652 si->sadhcnt = net->xfrm.state_hmask;
649 si->sadhmcnt = xfrm_state_hashmax; 653 si->sadhmcnt = xfrm_state_hashmax;
650 spin_unlock_bh(&xfrm_state_lock); 654 spin_unlock_bh(&xfrm_state_lock);
651} 655}
@@ -665,7 +669,7 @@ xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
665 return 0; 669 return 0;
666} 670}
667 671
668static struct xfrm_state *__xfrm_state_lookup(struct net *net, xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family) 672static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark, xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
669{ 673{
670 unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family); 674 unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
671 struct xfrm_state *x; 675 struct xfrm_state *x;
@@ -678,6 +682,8 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, xfrm_address_t *d
678 xfrm_addr_cmp(&x->id.daddr, daddr, family)) 682 xfrm_addr_cmp(&x->id.daddr, daddr, family))
679 continue; 683 continue;
680 684
685 if ((mark & x->mark.m) != x->mark.v)
686 continue;
681 xfrm_state_hold(x); 687 xfrm_state_hold(x);
682 return x; 688 return x;
683 } 689 }
@@ -685,7 +691,7 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, xfrm_address_t *d
685 return NULL; 691 return NULL;
686} 692}
687 693
688static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family) 694static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
689{ 695{
690 unsigned int h = xfrm_src_hash(net, daddr, saddr, family); 696 unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
691 struct xfrm_state *x; 697 struct xfrm_state *x;
@@ -698,6 +704,8 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, xfrm_addre
698 xfrm_addr_cmp(&x->props.saddr, saddr, family)) 704 xfrm_addr_cmp(&x->props.saddr, saddr, family))
699 continue; 705 continue;
700 706
707 if ((mark & x->mark.m) != x->mark.v)
708 continue;
701 xfrm_state_hold(x); 709 xfrm_state_hold(x);
702 return x; 710 return x;
703 } 711 }
@@ -709,12 +717,14 @@ static inline struct xfrm_state *
709__xfrm_state_locate(struct xfrm_state *x, int use_spi, int family) 717__xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
710{ 718{
711 struct net *net = xs_net(x); 719 struct net *net = xs_net(x);
720 u32 mark = x->mark.v & x->mark.m;
712 721
713 if (use_spi) 722 if (use_spi)
714 return __xfrm_state_lookup(net, &x->id.daddr, x->id.spi, 723 return __xfrm_state_lookup(net, mark, &x->id.daddr,
715 x->id.proto, family); 724 x->id.spi, x->id.proto, family);
716 else 725 else
717 return __xfrm_state_lookup_byaddr(net, &x->id.daddr, 726 return __xfrm_state_lookup_byaddr(net, mark,
727 &x->id.daddr,
718 &x->props.saddr, 728 &x->props.saddr,
719 x->id.proto, family); 729 x->id.proto, family);
720} 730}
@@ -779,6 +789,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
779 int acquire_in_progress = 0; 789 int acquire_in_progress = 0;
780 int error = 0; 790 int error = 0;
781 struct xfrm_state *best = NULL; 791 struct xfrm_state *best = NULL;
792 u32 mark = pol->mark.v & pol->mark.m;
782 793
783 to_put = NULL; 794 to_put = NULL;
784 795
@@ -787,6 +798,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
787 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 798 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
788 if (x->props.family == family && 799 if (x->props.family == family &&
789 x->props.reqid == tmpl->reqid && 800 x->props.reqid == tmpl->reqid &&
801 (mark & x->mark.m) == x->mark.v &&
790 !(x->props.flags & XFRM_STATE_WILDRECV) && 802 !(x->props.flags & XFRM_STATE_WILDRECV) &&
791 xfrm_state_addr_check(x, daddr, saddr, family) && 803 xfrm_state_addr_check(x, daddr, saddr, family) &&
792 tmpl->mode == x->props.mode && 804 tmpl->mode == x->props.mode &&
@@ -802,6 +814,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
802 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { 814 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
803 if (x->props.family == family && 815 if (x->props.family == family &&
804 x->props.reqid == tmpl->reqid && 816 x->props.reqid == tmpl->reqid &&
817 (mark & x->mark.m) == x->mark.v &&
805 !(x->props.flags & XFRM_STATE_WILDRECV) && 818 !(x->props.flags & XFRM_STATE_WILDRECV) &&
806 xfrm_state_addr_check(x, daddr, saddr, family) && 819 xfrm_state_addr_check(x, daddr, saddr, family) &&
807 tmpl->mode == x->props.mode && 820 tmpl->mode == x->props.mode &&
@@ -815,7 +828,7 @@ found:
815 x = best; 828 x = best;
816 if (!x && !error && !acquire_in_progress) { 829 if (!x && !error && !acquire_in_progress) {
817 if (tmpl->id.spi && 830 if (tmpl->id.spi &&
818 (x0 = __xfrm_state_lookup(net, daddr, tmpl->id.spi, 831 (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
819 tmpl->id.proto, family)) != NULL) { 832 tmpl->id.proto, family)) != NULL) {
820 to_put = x0; 833 to_put = x0;
821 error = -EEXIST; 834 error = -EEXIST;
@@ -829,6 +842,7 @@ found:
829 /* Initialize temporary selector matching only 842 /* Initialize temporary selector matching only
830 * to current session. */ 843 * to current session. */
831 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family); 844 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
845 memcpy(&x->mark, &pol->mark, sizeof(x->mark));
832 846
833 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid); 847 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
834 if (error) { 848 if (error) {
@@ -871,7 +885,7 @@ out:
871} 885}
872 886
873struct xfrm_state * 887struct xfrm_state *
874xfrm_stateonly_find(struct net *net, 888xfrm_stateonly_find(struct net *net, u32 mark,
875 xfrm_address_t *daddr, xfrm_address_t *saddr, 889 xfrm_address_t *daddr, xfrm_address_t *saddr,
876 unsigned short family, u8 mode, u8 proto, u32 reqid) 890 unsigned short family, u8 mode, u8 proto, u32 reqid)
877{ 891{
@@ -884,6 +898,7 @@ xfrm_stateonly_find(struct net *net,
884 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 898 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
885 if (x->props.family == family && 899 if (x->props.family == family &&
886 x->props.reqid == reqid && 900 x->props.reqid == reqid &&
901 (mark & x->mark.m) == x->mark.v &&
887 !(x->props.flags & XFRM_STATE_WILDRECV) && 902 !(x->props.flags & XFRM_STATE_WILDRECV) &&
888 xfrm_state_addr_check(x, daddr, saddr, family) && 903 xfrm_state_addr_check(x, daddr, saddr, family) &&
889 mode == x->props.mode && 904 mode == x->props.mode &&
@@ -946,11 +961,13 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
946 struct xfrm_state *x; 961 struct xfrm_state *x;
947 struct hlist_node *entry; 962 struct hlist_node *entry;
948 unsigned int h; 963 unsigned int h;
964 u32 mark = xnew->mark.v & xnew->mark.m;
949 965
950 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family); 966 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
951 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 967 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
952 if (x->props.family == family && 968 if (x->props.family == family &&
953 x->props.reqid == reqid && 969 x->props.reqid == reqid &&
970 (mark & x->mark.m) == x->mark.v &&
954 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) && 971 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
955 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family)) 972 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
956 x->genid = xfrm_state_genid; 973 x->genid = xfrm_state_genid;
@@ -967,11 +984,12 @@ void xfrm_state_insert(struct xfrm_state *x)
967EXPORT_SYMBOL(xfrm_state_insert); 984EXPORT_SYMBOL(xfrm_state_insert);
968 985
969/* xfrm_state_lock is held */ 986/* xfrm_state_lock is held */
970static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create) 987static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m, unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
971{ 988{
972 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family); 989 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
973 struct hlist_node *entry; 990 struct hlist_node *entry;
974 struct xfrm_state *x; 991 struct xfrm_state *x;
992 u32 mark = m->v & m->m;
975 993
976 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 994 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
977 if (x->props.reqid != reqid || 995 if (x->props.reqid != reqid ||
@@ -980,6 +998,7 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family
980 x->km.state != XFRM_STATE_ACQ || 998 x->km.state != XFRM_STATE_ACQ ||
981 x->id.spi != 0 || 999 x->id.spi != 0 ||
982 x->id.proto != proto || 1000 x->id.proto != proto ||
1001 (mark & x->mark.m) != x->mark.v ||
983 xfrm_addr_cmp(&x->id.daddr, daddr, family) || 1002 xfrm_addr_cmp(&x->id.daddr, daddr, family) ||
984 xfrm_addr_cmp(&x->props.saddr, saddr, family)) 1003 xfrm_addr_cmp(&x->props.saddr, saddr, family))
985 continue; 1004 continue;
@@ -1022,6 +1041,8 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family
1022 x->props.family = family; 1041 x->props.family = family;
1023 x->props.mode = mode; 1042 x->props.mode = mode;
1024 x->props.reqid = reqid; 1043 x->props.reqid = reqid;
1044 x->mark.v = m->v;
1045 x->mark.m = m->m;
1025 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; 1046 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1026 xfrm_state_hold(x); 1047 xfrm_state_hold(x);
1027 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL); 1048 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
@@ -1038,7 +1059,7 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family
1038 return x; 1059 return x;
1039} 1060}
1040 1061
1041static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 seq); 1062static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1042 1063
1043int xfrm_state_add(struct xfrm_state *x) 1064int xfrm_state_add(struct xfrm_state *x)
1044{ 1065{
@@ -1046,6 +1067,7 @@ int xfrm_state_add(struct xfrm_state *x)
1046 struct xfrm_state *x1, *to_put; 1067 struct xfrm_state *x1, *to_put;
1047 int family; 1068 int family;
1048 int err; 1069 int err;
1070 u32 mark = x->mark.v & x->mark.m;
1049 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); 1071 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1050 1072
1051 family = x->props.family; 1073 family = x->props.family;
@@ -1063,7 +1085,7 @@ int xfrm_state_add(struct xfrm_state *x)
1063 } 1085 }
1064 1086
1065 if (use_spi && x->km.seq) { 1087 if (use_spi && x->km.seq) {
1066 x1 = __xfrm_find_acq_byseq(net, x->km.seq); 1088 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
1067 if (x1 && ((x1->id.proto != x->id.proto) || 1089 if (x1 && ((x1->id.proto != x->id.proto) ||
1068 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) { 1090 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1069 to_put = x1; 1091 to_put = x1;
@@ -1072,8 +1094,8 @@ int xfrm_state_add(struct xfrm_state *x)
1072 } 1094 }
1073 1095
1074 if (use_spi && !x1) 1096 if (use_spi && !x1)
1075 x1 = __find_acq_core(net, family, x->props.mode, x->props.reqid, 1097 x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
1076 x->id.proto, 1098 x->props.reqid, x->id.proto,
1077 &x->id.daddr, &x->props.saddr, 0); 1099 &x->id.daddr, &x->props.saddr, 0);
1078 1100
1079 __xfrm_state_bump_genids(x); 1101 __xfrm_state_bump_genids(x);
@@ -1102,7 +1124,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1102 int err = -ENOMEM; 1124 int err = -ENOMEM;
1103 struct xfrm_state *x = xfrm_state_alloc(net); 1125 struct xfrm_state *x = xfrm_state_alloc(net);
1104 if (!x) 1126 if (!x)
1105 goto error; 1127 goto out;
1106 1128
1107 memcpy(&x->id, &orig->id, sizeof(x->id)); 1129 memcpy(&x->id, &orig->id, sizeof(x->id));
1108 memcpy(&x->sel, &orig->sel, sizeof(x->sel)); 1130 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
@@ -1147,6 +1169,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1147 goto error; 1169 goto error;
1148 } 1170 }
1149 1171
1172 memcpy(&x->mark, &orig->mark, sizeof(x->mark));
1173
1150 err = xfrm_init_state(x); 1174 err = xfrm_init_state(x);
1151 if (err) 1175 if (err)
1152 goto error; 1176 goto error;
@@ -1160,16 +1184,10 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1160 return x; 1184 return x;
1161 1185
1162 error: 1186 error:
1187 xfrm_state_put(x);
1188out:
1163 if (errp) 1189 if (errp)
1164 *errp = err; 1190 *errp = err;
1165 if (x) {
1166 kfree(x->aalg);
1167 kfree(x->ealg);
1168 kfree(x->calg);
1169 kfree(x->encap);
1170 kfree(x->coaddr);
1171 }
1172 kfree(x);
1173 return NULL; 1191 return NULL;
1174} 1192}
1175 1193
@@ -1344,41 +1362,41 @@ int xfrm_state_check_expire(struct xfrm_state *x)
1344EXPORT_SYMBOL(xfrm_state_check_expire); 1362EXPORT_SYMBOL(xfrm_state_check_expire);
1345 1363
1346struct xfrm_state * 1364struct xfrm_state *
1347xfrm_state_lookup(struct net *net, xfrm_address_t *daddr, __be32 spi, u8 proto, 1365xfrm_state_lookup(struct net *net, u32 mark, xfrm_address_t *daddr, __be32 spi,
1348 unsigned short family) 1366 u8 proto, unsigned short family)
1349{ 1367{
1350 struct xfrm_state *x; 1368 struct xfrm_state *x;
1351 1369
1352 spin_lock_bh(&xfrm_state_lock); 1370 spin_lock_bh(&xfrm_state_lock);
1353 x = __xfrm_state_lookup(net, daddr, spi, proto, family); 1371 x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
1354 spin_unlock_bh(&xfrm_state_lock); 1372 spin_unlock_bh(&xfrm_state_lock);
1355 return x; 1373 return x;
1356} 1374}
1357EXPORT_SYMBOL(xfrm_state_lookup); 1375EXPORT_SYMBOL(xfrm_state_lookup);
1358 1376
1359struct xfrm_state * 1377struct xfrm_state *
1360xfrm_state_lookup_byaddr(struct net *net, 1378xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1361 xfrm_address_t *daddr, xfrm_address_t *saddr, 1379 xfrm_address_t *daddr, xfrm_address_t *saddr,
1362 u8 proto, unsigned short family) 1380 u8 proto, unsigned short family)
1363{ 1381{
1364 struct xfrm_state *x; 1382 struct xfrm_state *x;
1365 1383
1366 spin_lock_bh(&xfrm_state_lock); 1384 spin_lock_bh(&xfrm_state_lock);
1367 x = __xfrm_state_lookup_byaddr(net, daddr, saddr, proto, family); 1385 x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
1368 spin_unlock_bh(&xfrm_state_lock); 1386 spin_unlock_bh(&xfrm_state_lock);
1369 return x; 1387 return x;
1370} 1388}
1371EXPORT_SYMBOL(xfrm_state_lookup_byaddr); 1389EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1372 1390
1373struct xfrm_state * 1391struct xfrm_state *
1374xfrm_find_acq(struct net *net, u8 mode, u32 reqid, u8 proto, 1392xfrm_find_acq(struct net *net, struct xfrm_mark *mark, u8 mode, u32 reqid, u8 proto,
1375 xfrm_address_t *daddr, xfrm_address_t *saddr, 1393 xfrm_address_t *daddr, xfrm_address_t *saddr,
1376 int create, unsigned short family) 1394 int create, unsigned short family)
1377{ 1395{
1378 struct xfrm_state *x; 1396 struct xfrm_state *x;
1379 1397
1380 spin_lock_bh(&xfrm_state_lock); 1398 spin_lock_bh(&xfrm_state_lock);
1381 x = __find_acq_core(net, family, mode, reqid, proto, daddr, saddr, create); 1399 x = __find_acq_core(net, mark, family, mode, reqid, proto, daddr, saddr, create);
1382 spin_unlock_bh(&xfrm_state_lock); 1400 spin_unlock_bh(&xfrm_state_lock);
1383 1401
1384 return x; 1402 return x;
@@ -1425,7 +1443,7 @@ EXPORT_SYMBOL(xfrm_state_sort);
1425 1443
1426/* Silly enough, but I'm lazy to build resolution list */ 1444/* Silly enough, but I'm lazy to build resolution list */
1427 1445
1428static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 seq) 1446static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1429{ 1447{
1430 int i; 1448 int i;
1431 1449
@@ -1435,6 +1453,7 @@ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 seq)
1435 1453
1436 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) { 1454 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) {
1437 if (x->km.seq == seq && 1455 if (x->km.seq == seq &&
1456 (mark & x->mark.m) == x->mark.v &&
1438 x->km.state == XFRM_STATE_ACQ) { 1457 x->km.state == XFRM_STATE_ACQ) {
1439 xfrm_state_hold(x); 1458 xfrm_state_hold(x);
1440 return x; 1459 return x;
@@ -1444,12 +1463,12 @@ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 seq)
1444 return NULL; 1463 return NULL;
1445} 1464}
1446 1465
1447struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 seq) 1466struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1448{ 1467{
1449 struct xfrm_state *x; 1468 struct xfrm_state *x;
1450 1469
1451 spin_lock_bh(&xfrm_state_lock); 1470 spin_lock_bh(&xfrm_state_lock);
1452 x = __xfrm_find_acq_byseq(net, seq); 1471 x = __xfrm_find_acq_byseq(net, mark, seq);
1453 spin_unlock_bh(&xfrm_state_lock); 1472 spin_unlock_bh(&xfrm_state_lock);
1454 return x; 1473 return x;
1455} 1474}
@@ -1458,12 +1477,12 @@ EXPORT_SYMBOL(xfrm_find_acq_byseq);
1458u32 xfrm_get_acqseq(void) 1477u32 xfrm_get_acqseq(void)
1459{ 1478{
1460 u32 res; 1479 u32 res;
1461 static u32 acqseq; 1480 static atomic_t acqseq;
1462 static DEFINE_SPINLOCK(acqseq_lock); 1481
1482 do {
1483 res = atomic_inc_return(&acqseq);
1484 } while (!res);
1463 1485
1464 spin_lock_bh(&acqseq_lock);
1465 res = (++acqseq ? : ++acqseq);
1466 spin_unlock_bh(&acqseq_lock);
1467 return res; 1486 return res;
1468} 1487}
1469EXPORT_SYMBOL(xfrm_get_acqseq); 1488EXPORT_SYMBOL(xfrm_get_acqseq);
@@ -1476,6 +1495,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1476 int err = -ENOENT; 1495 int err = -ENOENT;
1477 __be32 minspi = htonl(low); 1496 __be32 minspi = htonl(low);
1478 __be32 maxspi = htonl(high); 1497 __be32 maxspi = htonl(high);
1498 u32 mark = x->mark.v & x->mark.m;
1479 1499
1480 spin_lock_bh(&x->lock); 1500 spin_lock_bh(&x->lock);
1481 if (x->km.state == XFRM_STATE_DEAD) 1501 if (x->km.state == XFRM_STATE_DEAD)
@@ -1488,7 +1508,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1488 err = -ENOENT; 1508 err = -ENOENT;
1489 1509
1490 if (minspi == maxspi) { 1510 if (minspi == maxspi) {
1491 x0 = xfrm_state_lookup(net, &x->id.daddr, minspi, x->id.proto, x->props.family); 1511 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
1492 if (x0) { 1512 if (x0) {
1493 xfrm_state_put(x0); 1513 xfrm_state_put(x0);
1494 goto unlock; 1514 goto unlock;
@@ -1498,7 +1518,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1498 u32 spi = 0; 1518 u32 spi = 0;
1499 for (h=0; h<high-low+1; h++) { 1519 for (h=0; h<high-low+1; h++) {
1500 spi = low + net_random()%(high-low+1); 1520 spi = low + net_random()%(high-low+1);
1501 x0 = xfrm_state_lookup(net, &x->id.daddr, htonl(spi), x->id.proto, x->props.family); 1521 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1502 if (x0 == NULL) { 1522 if (x0 == NULL) {
1503 x->id.spi = htonl(spi); 1523 x->id.spi = htonl(spi);
1504 break; 1524 break;
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
index 2e221f2cad7e..2c4d6cdcba49 100644
--- a/net/xfrm/xfrm_sysctl.c
+++ b/net/xfrm/xfrm_sysctl.c
@@ -2,7 +2,7 @@
2#include <net/net_namespace.h> 2#include <net/net_namespace.h>
3#include <net/xfrm.h> 3#include <net/xfrm.h>
4 4
5static void __xfrm_sysctl_init(struct net *net) 5static void __net_init __xfrm_sysctl_init(struct net *net)
6{ 6{
7 net->xfrm.sysctl_aevent_etime = XFRM_AE_ETIME; 7 net->xfrm.sysctl_aevent_etime = XFRM_AE_ETIME;
8 net->xfrm.sysctl_aevent_rseqth = XFRM_AE_SEQT_SIZE; 8 net->xfrm.sysctl_aevent_rseqth = XFRM_AE_SEQT_SIZE;
@@ -64,7 +64,7 @@ out_kmemdup:
64 return -ENOMEM; 64 return -ENOMEM;
65} 65}
66 66
67void xfrm_sysctl_fini(struct net *net) 67void __net_exit xfrm_sysctl_fini(struct net *net)
68{ 68{
69 struct ctl_table *table; 69 struct ctl_table *table;
70 70
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 1ada6186933c..6106b72826d3 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -446,6 +446,8 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
446 goto error; 446 goto error;
447 } 447 }
448 448
449 xfrm_mark_get(attrs, &x->mark);
450
449 err = xfrm_init_state(x); 451 err = xfrm_init_state(x);
450 if (err) 452 if (err)
451 goto error; 453 goto error;
@@ -526,11 +528,13 @@ static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
526 int *errp) 528 int *errp)
527{ 529{
528 struct xfrm_state *x = NULL; 530 struct xfrm_state *x = NULL;
531 struct xfrm_mark m;
529 int err; 532 int err;
533 u32 mark = xfrm_mark_get(attrs, &m);
530 534
531 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) { 535 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
532 err = -ESRCH; 536 err = -ESRCH;
533 x = xfrm_state_lookup(net, &p->daddr, p->spi, p->proto, p->family); 537 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
534 } else { 538 } else {
535 xfrm_address_t *saddr = NULL; 539 xfrm_address_t *saddr = NULL;
536 540
@@ -541,7 +545,8 @@ static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
541 } 545 }
542 546
543 err = -ESRCH; 547 err = -ESRCH;
544 x = xfrm_state_lookup_byaddr(net, &p->daddr, saddr, 548 x = xfrm_state_lookup_byaddr(net, mark,
549 &p->daddr, saddr,
545 p->proto, p->family); 550 p->proto, p->family);
546 } 551 }
547 552
@@ -683,6 +688,9 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
683 if (x->encap) 688 if (x->encap)
684 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); 689 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
685 690
691 if (xfrm_mark_put(skb, &x->mark))
692 goto nla_put_failure;
693
686 if (x->security && copy_sec_ctx(x->security, skb) < 0) 694 if (x->security && copy_sec_ctx(x->security, skb) < 0)
687 goto nla_put_failure; 695 goto nla_put_failure;
688 696
@@ -781,7 +789,8 @@ static inline size_t xfrm_spdinfo_msgsize(void)
781 + nla_total_size(sizeof(struct xfrmu_spdhinfo)); 789 + nla_total_size(sizeof(struct xfrmu_spdhinfo));
782} 790}
783 791
784static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) 792static int build_spdinfo(struct sk_buff *skb, struct net *net,
793 u32 pid, u32 seq, u32 flags)
785{ 794{
786 struct xfrmk_spdinfo si; 795 struct xfrmk_spdinfo si;
787 struct xfrmu_spdinfo spc; 796 struct xfrmu_spdinfo spc;
@@ -795,7 +804,7 @@ static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
795 804
796 f = nlmsg_data(nlh); 805 f = nlmsg_data(nlh);
797 *f = flags; 806 *f = flags;
798 xfrm_spd_getinfo(&si); 807 xfrm_spd_getinfo(net, &si);
799 spc.incnt = si.incnt; 808 spc.incnt = si.incnt;
800 spc.outcnt = si.outcnt; 809 spc.outcnt = si.outcnt;
801 spc.fwdcnt = si.fwdcnt; 810 spc.fwdcnt = si.fwdcnt;
@@ -828,7 +837,7 @@ static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
828 if (r_skb == NULL) 837 if (r_skb == NULL)
829 return -ENOMEM; 838 return -ENOMEM;
830 839
831 if (build_spdinfo(r_skb, spid, seq, *flags) < 0) 840 if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0)
832 BUG(); 841 BUG();
833 842
834 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); 843 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
@@ -841,7 +850,8 @@ static inline size_t xfrm_sadinfo_msgsize(void)
841 + nla_total_size(4); /* XFRMA_SAD_CNT */ 850 + nla_total_size(4); /* XFRMA_SAD_CNT */
842} 851}
843 852
844static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) 853static int build_sadinfo(struct sk_buff *skb, struct net *net,
854 u32 pid, u32 seq, u32 flags)
845{ 855{
846 struct xfrmk_sadinfo si; 856 struct xfrmk_sadinfo si;
847 struct xfrmu_sadhinfo sh; 857 struct xfrmu_sadhinfo sh;
@@ -854,7 +864,7 @@ static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
854 864
855 f = nlmsg_data(nlh); 865 f = nlmsg_data(nlh);
856 *f = flags; 866 *f = flags;
857 xfrm_sad_getinfo(&si); 867 xfrm_sad_getinfo(net, &si);
858 868
859 sh.sadhmcnt = si.sadhmcnt; 869 sh.sadhmcnt = si.sadhmcnt;
860 sh.sadhcnt = si.sadhcnt; 870 sh.sadhcnt = si.sadhcnt;
@@ -882,7 +892,7 @@ static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
882 if (r_skb == NULL) 892 if (r_skb == NULL)
883 return -ENOMEM; 893 return -ENOMEM;
884 894
885 if (build_sadinfo(r_skb, spid, seq, *flags) < 0) 895 if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0)
886 BUG(); 896 BUG();
887 897
888 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); 898 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
@@ -945,6 +955,8 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
945 xfrm_address_t *daddr; 955 xfrm_address_t *daddr;
946 int family; 956 int family;
947 int err; 957 int err;
958 u32 mark;
959 struct xfrm_mark m;
948 960
949 p = nlmsg_data(nlh); 961 p = nlmsg_data(nlh);
950 err = verify_userspi_info(p); 962 err = verify_userspi_info(p);
@@ -955,8 +967,10 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
955 daddr = &p->info.id.daddr; 967 daddr = &p->info.id.daddr;
956 968
957 x = NULL; 969 x = NULL;
970
971 mark = xfrm_mark_get(attrs, &m);
958 if (p->info.seq) { 972 if (p->info.seq) {
959 x = xfrm_find_acq_byseq(net, p->info.seq); 973 x = xfrm_find_acq_byseq(net, mark, p->info.seq);
960 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) { 974 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
961 xfrm_state_put(x); 975 xfrm_state_put(x);
962 x = NULL; 976 x = NULL;
@@ -964,7 +978,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
964 } 978 }
965 979
966 if (!x) 980 if (!x)
967 x = xfrm_find_acq(net, p->info.mode, p->info.reqid, 981 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
968 p->info.id.proto, daddr, 982 p->info.id.proto, daddr,
969 &p->info.saddr, 1, 983 &p->info.saddr, 1,
970 family); 984 family);
@@ -1218,6 +1232,8 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_us
1218 if (err) 1232 if (err)
1219 goto error; 1233 goto error;
1220 1234
1235 xfrm_mark_get(attrs, &xp->mark);
1236
1221 return xp; 1237 return xp;
1222 error: 1238 error:
1223 *errp = err; 1239 *errp = err;
@@ -1364,10 +1380,13 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
1364 goto nlmsg_failure; 1380 goto nlmsg_failure;
1365 if (copy_to_user_policy_type(xp->type, skb) < 0) 1381 if (copy_to_user_policy_type(xp->type, skb) < 0)
1366 goto nlmsg_failure; 1382 goto nlmsg_failure;
1383 if (xfrm_mark_put(skb, &xp->mark))
1384 goto nla_put_failure;
1367 1385
1368 nlmsg_end(skb, nlh); 1386 nlmsg_end(skb, nlh);
1369 return 0; 1387 return 0;
1370 1388
1389nla_put_failure:
1371nlmsg_failure: 1390nlmsg_failure:
1372 nlmsg_cancel(skb, nlh); 1391 nlmsg_cancel(skb, nlh);
1373 return -EMSGSIZE; 1392 return -EMSGSIZE;
@@ -1439,6 +1458,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1439 int err; 1458 int err;
1440 struct km_event c; 1459 struct km_event c;
1441 int delete; 1460 int delete;
1461 struct xfrm_mark m;
1462 u32 mark = xfrm_mark_get(attrs, &m);
1442 1463
1443 p = nlmsg_data(nlh); 1464 p = nlmsg_data(nlh);
1444 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY; 1465 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
@@ -1452,7 +1473,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1452 return err; 1473 return err;
1453 1474
1454 if (p->index) 1475 if (p->index)
1455 xp = xfrm_policy_byid(net, type, p->dir, p->index, delete, &err); 1476 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err);
1456 else { 1477 else {
1457 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1478 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1458 struct xfrm_sec_ctx *ctx; 1479 struct xfrm_sec_ctx *ctx;
@@ -1469,8 +1490,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1469 if (err) 1490 if (err)
1470 return err; 1491 return err;
1471 } 1492 }
1472 xp = xfrm_policy_bysel_ctx(net, type, p->dir, &p->sel, ctx, 1493 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel,
1473 delete, &err); 1494 ctx, delete, &err);
1474 security_xfrm_policy_free(ctx); 1495 security_xfrm_policy_free(ctx);
1475 } 1496 }
1476 if (xp == NULL) 1497 if (xp == NULL)
@@ -1522,8 +1543,11 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1522 audit_info.sessionid = NETLINK_CB(skb).sessionid; 1543 audit_info.sessionid = NETLINK_CB(skb).sessionid;
1523 audit_info.secid = NETLINK_CB(skb).sid; 1544 audit_info.secid = NETLINK_CB(skb).sid;
1524 err = xfrm_state_flush(net, p->proto, &audit_info); 1545 err = xfrm_state_flush(net, p->proto, &audit_info);
1525 if (err) 1546 if (err) {
1547 if (err == -ESRCH) /* empty table */
1548 return 0;
1526 return err; 1549 return err;
1550 }
1527 c.data.proto = p->proto; 1551 c.data.proto = p->proto;
1528 c.event = nlh->nlmsg_type; 1552 c.event = nlh->nlmsg_type;
1529 c.seq = nlh->nlmsg_seq; 1553 c.seq = nlh->nlmsg_seq;
@@ -1539,6 +1563,7 @@ static inline size_t xfrm_aevent_msgsize(void)
1539 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id)) 1563 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
1540 + nla_total_size(sizeof(struct xfrm_replay_state)) 1564 + nla_total_size(sizeof(struct xfrm_replay_state))
1541 + nla_total_size(sizeof(struct xfrm_lifetime_cur)) 1565 + nla_total_size(sizeof(struct xfrm_lifetime_cur))
1566 + nla_total_size(sizeof(struct xfrm_mark))
1542 + nla_total_size(4) /* XFRM_AE_RTHR */ 1567 + nla_total_size(4) /* XFRM_AE_RTHR */
1543 + nla_total_size(4); /* XFRM_AE_ETHR */ 1568 + nla_total_size(4); /* XFRM_AE_ETHR */
1544} 1569}
@@ -1571,6 +1596,9 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_eve
1571 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH, 1596 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH,
1572 x->replay_maxage * 10 / HZ); 1597 x->replay_maxage * 10 / HZ);
1573 1598
1599 if (xfrm_mark_put(skb, &x->mark))
1600 goto nla_put_failure;
1601
1574 return nlmsg_end(skb, nlh); 1602 return nlmsg_end(skb, nlh);
1575 1603
1576nla_put_failure: 1604nla_put_failure:
@@ -1586,6 +1614,8 @@ static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1586 struct sk_buff *r_skb; 1614 struct sk_buff *r_skb;
1587 int err; 1615 int err;
1588 struct km_event c; 1616 struct km_event c;
1617 u32 mark;
1618 struct xfrm_mark m;
1589 struct xfrm_aevent_id *p = nlmsg_data(nlh); 1619 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1590 struct xfrm_usersa_id *id = &p->sa_id; 1620 struct xfrm_usersa_id *id = &p->sa_id;
1591 1621
@@ -1593,7 +1623,9 @@ static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1593 if (r_skb == NULL) 1623 if (r_skb == NULL)
1594 return -ENOMEM; 1624 return -ENOMEM;
1595 1625
1596 x = xfrm_state_lookup(net, &id->daddr, id->spi, id->proto, id->family); 1626 mark = xfrm_mark_get(attrs, &m);
1627
1628 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
1597 if (x == NULL) { 1629 if (x == NULL) {
1598 kfree_skb(r_skb); 1630 kfree_skb(r_skb);
1599 return -ESRCH; 1631 return -ESRCH;
@@ -1624,6 +1656,8 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1624 struct xfrm_state *x; 1656 struct xfrm_state *x;
1625 struct km_event c; 1657 struct km_event c;
1626 int err = - EINVAL; 1658 int err = - EINVAL;
1659 u32 mark = 0;
1660 struct xfrm_mark m;
1627 struct xfrm_aevent_id *p = nlmsg_data(nlh); 1661 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1628 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; 1662 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
1629 struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; 1663 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
@@ -1635,7 +1669,9 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1635 if (!(nlh->nlmsg_flags&NLM_F_REPLACE)) 1669 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
1636 return err; 1670 return err;
1637 1671
1638 x = xfrm_state_lookup(net, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family); 1672 mark = xfrm_mark_get(attrs, &m);
1673
1674 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
1639 if (x == NULL) 1675 if (x == NULL)
1640 return -ESRCH; 1676 return -ESRCH;
1641 1677
@@ -1674,8 +1710,12 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1674 audit_info.sessionid = NETLINK_CB(skb).sessionid; 1710 audit_info.sessionid = NETLINK_CB(skb).sessionid;
1675 audit_info.secid = NETLINK_CB(skb).sid; 1711 audit_info.secid = NETLINK_CB(skb).sid;
1676 err = xfrm_policy_flush(net, type, &audit_info); 1712 err = xfrm_policy_flush(net, type, &audit_info);
1677 if (err) 1713 if (err) {
1714 if (err == -ESRCH) /* empty table */
1715 return 0;
1678 return err; 1716 return err;
1717 }
1718
1679 c.data.type = type; 1719 c.data.type = type;
1680 c.event = nlh->nlmsg_type; 1720 c.event = nlh->nlmsg_type;
1681 c.seq = nlh->nlmsg_seq; 1721 c.seq = nlh->nlmsg_seq;
@@ -1694,13 +1734,15 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1694 struct xfrm_userpolicy_info *p = &up->pol; 1734 struct xfrm_userpolicy_info *p = &up->pol;
1695 u8 type = XFRM_POLICY_TYPE_MAIN; 1735 u8 type = XFRM_POLICY_TYPE_MAIN;
1696 int err = -ENOENT; 1736 int err = -ENOENT;
1737 struct xfrm_mark m;
1738 u32 mark = xfrm_mark_get(attrs, &m);
1697 1739
1698 err = copy_from_user_policy_type(&type, attrs); 1740 err = copy_from_user_policy_type(&type, attrs);
1699 if (err) 1741 if (err)
1700 return err; 1742 return err;
1701 1743
1702 if (p->index) 1744 if (p->index)
1703 xp = xfrm_policy_byid(net, type, p->dir, p->index, 0, &err); 1745 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err);
1704 else { 1746 else {
1705 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1747 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1706 struct xfrm_sec_ctx *ctx; 1748 struct xfrm_sec_ctx *ctx;
@@ -1717,7 +1759,8 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1717 if (err) 1759 if (err)
1718 return err; 1760 return err;
1719 } 1761 }
1720 xp = xfrm_policy_bysel_ctx(net, type, p->dir, &p->sel, ctx, 0, &err); 1762 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir,
1763 &p->sel, ctx, 0, &err);
1721 security_xfrm_policy_free(ctx); 1764 security_xfrm_policy_free(ctx);
1722 } 1765 }
1723 if (xp == NULL) 1766 if (xp == NULL)
@@ -1757,8 +1800,10 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1757 int err; 1800 int err;
1758 struct xfrm_user_expire *ue = nlmsg_data(nlh); 1801 struct xfrm_user_expire *ue = nlmsg_data(nlh);
1759 struct xfrm_usersa_info *p = &ue->state; 1802 struct xfrm_usersa_info *p = &ue->state;
1803 struct xfrm_mark m;
1804 u32 mark = xfrm_mark_get(attrs, &m);;
1760 1805
1761 x = xfrm_state_lookup(net, &p->id.daddr, p->id.spi, p->id.proto, p->family); 1806 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
1762 1807
1763 err = -ENOENT; 1808 err = -ENOENT;
1764 if (x == NULL) 1809 if (x == NULL)
@@ -1792,6 +1837,7 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1792 struct xfrm_user_tmpl *ut; 1837 struct xfrm_user_tmpl *ut;
1793 int i; 1838 int i;
1794 struct nlattr *rt = attrs[XFRMA_TMPL]; 1839 struct nlattr *rt = attrs[XFRMA_TMPL];
1840 struct xfrm_mark mark;
1795 1841
1796 struct xfrm_user_acquire *ua = nlmsg_data(nlh); 1842 struct xfrm_user_acquire *ua = nlmsg_data(nlh);
1797 struct xfrm_state *x = xfrm_state_alloc(net); 1843 struct xfrm_state *x = xfrm_state_alloc(net);
@@ -1800,6 +1846,8 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1800 if (!x) 1846 if (!x)
1801 goto nomem; 1847 goto nomem;
1802 1848
1849 xfrm_mark_get(attrs, &mark);
1850
1803 err = verify_newpolicy_info(&ua->policy); 1851 err = verify_newpolicy_info(&ua->policy);
1804 if (err) 1852 if (err)
1805 goto bad_policy; 1853 goto bad_policy;
@@ -1812,7 +1860,8 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1812 memcpy(&x->id, &ua->id, sizeof(ua->id)); 1860 memcpy(&x->id, &ua->id, sizeof(ua->id));
1813 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr)); 1861 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
1814 memcpy(&x->sel, &ua->sel, sizeof(ua->sel)); 1862 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
1815 1863 xp->mark.m = x->mark.m = mark.m;
1864 xp->mark.v = x->mark.v = mark.v;
1816 ut = nla_data(rt); 1865 ut = nla_data(rt);
1817 /* extract the templates and for each call km_key */ 1866 /* extract the templates and for each call km_key */
1818 for (i = 0; i < xp->xfrm_nr; i++, ut++) { 1867 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
@@ -2052,6 +2101,10 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
2052#undef XMSGSIZE 2101#undef XMSGSIZE
2053 2102
2054static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { 2103static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
2104 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
2105 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
2106 [XFRMA_LASTUSED] = { .type = NLA_U64},
2107 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
2055 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) }, 2108 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
2056 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) }, 2109 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
2057 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) }, 2110 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
@@ -2068,6 +2121,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
2068 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)}, 2121 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
2069 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) }, 2122 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
2070 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) }, 2123 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
2124 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
2071}; 2125};
2072 2126
2073static struct xfrm_link { 2127static struct xfrm_link {
@@ -2147,7 +2201,8 @@ static void xfrm_netlink_rcv(struct sk_buff *skb)
2147 2201
2148static inline size_t xfrm_expire_msgsize(void) 2202static inline size_t xfrm_expire_msgsize(void)
2149{ 2203{
2150 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire)); 2204 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
2205 + nla_total_size(sizeof(struct xfrm_mark));
2151} 2206}
2152 2207
2153static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c) 2208static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
@@ -2163,7 +2218,13 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_eve
2163 copy_to_user_state(x, &ue->state); 2218 copy_to_user_state(x, &ue->state);
2164 ue->hard = (c->data.hard != 0) ? 1 : 0; 2219 ue->hard = (c->data.hard != 0) ? 1 : 0;
2165 2220
2221 if (xfrm_mark_put(skb, &x->mark))
2222 goto nla_put_failure;
2223
2166 return nlmsg_end(skb, nlh); 2224 return nlmsg_end(skb, nlh);
2225
2226nla_put_failure:
2227 return -EMSGSIZE;
2167} 2228}
2168 2229
2169static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c) 2230static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
@@ -2175,8 +2236,10 @@ static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
2175 if (skb == NULL) 2236 if (skb == NULL)
2176 return -ENOMEM; 2237 return -ENOMEM;
2177 2238
2178 if (build_expire(skb, x, c) < 0) 2239 if (build_expire(skb, x, c) < 0) {
2179 BUG(); 2240 kfree_skb(skb);
2241 return -EMSGSIZE;
2242 }
2180 2243
2181 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 2244 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2182} 2245}
@@ -2264,6 +2327,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
2264 if (c->event == XFRM_MSG_DELSA) { 2327 if (c->event == XFRM_MSG_DELSA) {
2265 len += nla_total_size(headlen); 2328 len += nla_total_size(headlen);
2266 headlen = sizeof(*id); 2329 headlen = sizeof(*id);
2330 len += nla_total_size(sizeof(struct xfrm_mark));
2267 } 2331 }
2268 len += NLMSG_ALIGN(headlen); 2332 len += NLMSG_ALIGN(headlen);
2269 2333
@@ -2334,6 +2398,7 @@ static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
2334{ 2398{
2335 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire)) 2399 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
2336 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 2400 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2401 + nla_total_size(sizeof(struct xfrm_mark))
2337 + nla_total_size(xfrm_user_sec_ctx_size(x->security)) 2402 + nla_total_size(xfrm_user_sec_ctx_size(x->security))
2338 + userpolicy_type_attrsize(); 2403 + userpolicy_type_attrsize();
2339} 2404}
@@ -2366,9 +2431,12 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2366 goto nlmsg_failure; 2431 goto nlmsg_failure;
2367 if (copy_to_user_policy_type(xp->type, skb) < 0) 2432 if (copy_to_user_policy_type(xp->type, skb) < 0)
2368 goto nlmsg_failure; 2433 goto nlmsg_failure;
2434 if (xfrm_mark_put(skb, &xp->mark))
2435 goto nla_put_failure;
2369 2436
2370 return nlmsg_end(skb, nlh); 2437 return nlmsg_end(skb, nlh);
2371 2438
2439nla_put_failure:
2372nlmsg_failure: 2440nlmsg_failure:
2373 nlmsg_cancel(skb, nlh); 2441 nlmsg_cancel(skb, nlh);
2374 return -EMSGSIZE; 2442 return -EMSGSIZE;
@@ -2455,6 +2523,7 @@ static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
2455 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire)) 2523 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
2456 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 2524 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2457 + nla_total_size(xfrm_user_sec_ctx_size(xp->security)) 2525 + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
2526 + nla_total_size(sizeof(struct xfrm_mark))
2458 + userpolicy_type_attrsize(); 2527 + userpolicy_type_attrsize();
2459} 2528}
2460 2529
@@ -2477,10 +2546,13 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2477 goto nlmsg_failure; 2546 goto nlmsg_failure;
2478 if (copy_to_user_policy_type(xp->type, skb) < 0) 2547 if (copy_to_user_policy_type(xp->type, skb) < 0)
2479 goto nlmsg_failure; 2548 goto nlmsg_failure;
2549 if (xfrm_mark_put(skb, &xp->mark))
2550 goto nla_put_failure;
2480 upe->hard = !!hard; 2551 upe->hard = !!hard;
2481 2552
2482 return nlmsg_end(skb, nlh); 2553 return nlmsg_end(skb, nlh);
2483 2554
2555nla_put_failure:
2484nlmsg_failure: 2556nlmsg_failure:
2485 nlmsg_cancel(skb, nlh); 2557 nlmsg_cancel(skb, nlh);
2486 return -EMSGSIZE; 2558 return -EMSGSIZE;
@@ -2517,6 +2589,7 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *
2517 headlen = sizeof(*id); 2589 headlen = sizeof(*id);
2518 } 2590 }
2519 len += userpolicy_type_attrsize(); 2591 len += userpolicy_type_attrsize();
2592 len += nla_total_size(sizeof(struct xfrm_mark));
2520 len += NLMSG_ALIGN(headlen); 2593 len += NLMSG_ALIGN(headlen);
2521 2594
2522 skb = nlmsg_new(len, GFP_ATOMIC); 2595 skb = nlmsg_new(len, GFP_ATOMIC);
@@ -2552,10 +2625,14 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *
2552 if (copy_to_user_policy_type(xp->type, skb) < 0) 2625 if (copy_to_user_policy_type(xp->type, skb) < 0)
2553 goto nlmsg_failure; 2626 goto nlmsg_failure;
2554 2627
2628 if (xfrm_mark_put(skb, &xp->mark))
2629 goto nla_put_failure;
2630
2555 nlmsg_end(skb, nlh); 2631 nlmsg_end(skb, nlh);
2556 2632
2557 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2633 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2558 2634
2635nla_put_failure:
2559nlmsg_failure: 2636nlmsg_failure:
2560 kfree_skb(skb); 2637 kfree_skb(skb);
2561 return -1; 2638 return -1;