aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c4
-rw-r--r--net/8021q/vlan.h2
-rw-r--r--net/8021q/vlan_core.c2
-rw-r--r--net/8021q/vlan_dev.c9
-rw-r--r--net/8021q/vlanproc.c2
-rw-r--r--net/appletalk/atalk_proc.c30
-rw-r--r--net/atm/addr.c2
-rw-r--r--net/atm/atm_misc.c40
-rw-r--r--net/atm/atm_sysfs.c27
-rw-r--r--net/atm/br2684.c90
-rw-r--r--net/atm/clip.c86
-rw-r--r--net/atm/common.c386
-rw-r--r--net/atm/ioctl.c196
-rw-r--r--net/atm/lec.c599
-rw-r--r--net/atm/mpc.c540
-rw-r--r--net/atm/mpoa_caches.c190
-rw-r--r--net/atm/mpoa_proc.c89
-rw-r--r--net/atm/pppoatm.c28
-rw-r--r--net/atm/proc.c83
-rw-r--r--net/atm/pvc.c43
-rw-r--r--net/atm/raw.c26
-rw-r--r--net/atm/resources.c418
-rw-r--r--net/atm/signaling.c219
-rw-r--r--net/atm/svc.c258
-rw-r--r--net/ax25/af_ax25.c18
-rw-r--r--net/ax25/ax25_uid.c25
-rw-r--r--net/bluetooth/bnep/netdev.c6
-rw-r--r--net/bluetooth/cmtp/capi.c37
-rw-r--r--net/bridge/br_if.c4
-rw-r--r--net/bridge/br_private.h1
-rw-r--r--net/bridge/netfilter/ebt_802_3.c2
-rw-r--r--net/bridge/netfilter/ebt_arp.c2
-rw-r--r--net/bridge/netfilter/ebt_arpreply.c2
-rw-r--r--net/bridge/netfilter/ebt_dnat.c2
-rw-r--r--net/bridge/netfilter/ebt_ip.c2
-rw-r--r--net/bridge/netfilter/ebt_ip6.c2
-rw-r--r--net/bridge/netfilter/ebt_limit.c18
-rw-r--r--net/bridge/netfilter/ebt_log.c2
-rw-r--r--net/bridge/netfilter/ebt_mark.c33
-rw-r--r--net/bridge/netfilter/ebt_mark_m.c39
-rw-r--r--net/bridge/netfilter/ebt_nflog.c2
-rw-r--r--net/bridge/netfilter/ebt_pkttype.c2
-rw-r--r--net/bridge/netfilter/ebt_redirect.c2
-rw-r--r--net/bridge/netfilter/ebt_snat.c2
-rw-r--r--net/bridge/netfilter/ebt_stp.c2
-rw-r--r--net/bridge/netfilter/ebt_ulog.c2
-rw-r--r--net/bridge/netfilter/ebt_vlan.c2
-rw-r--r--net/bridge/netfilter/ebtable_broute.c2
-rw-r--r--net/bridge/netfilter/ebtable_filter.c2
-rw-r--r--net/bridge/netfilter/ebtable_nat.c2
-rw-r--r--net/bridge/netfilter/ebtables.c1241
-rw-r--r--net/can/af_can.c124
-rw-r--r--net/can/af_can.h4
-rw-r--r--net/can/proc.c93
-rw-r--r--net/core/dev.c107
-rw-r--r--net/core/drop_monitor.c1
-rw-r--r--net/core/ethtool.c380
-rw-r--r--net/core/fib_rules.c2
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/neighbour.c18
-rw-r--r--net/core/netpoll.c169
-rw-r--r--net/core/pktgen.c3
-rw-r--r--net/core/rtnetlink.c71
-rw-r--r--net/core/sock.c16
-rw-r--r--net/dcb/dcbnl.c16
-rw-r--r--net/dccp/ccid.c9
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dccp/proto.c7
-rw-r--r--net/ethernet/eth.c6
-rw-r--r--net/ipv4/af_inet.c46
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/arp.c58
-rw-r--r--net/ipv4/devinet.c30
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/fib_semantics.c80
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c87
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/ip_fragment.c39
-rw-r--r--net/ipv4/ip_gre.c25
-rw-r--r--net/ipv4/ip_sockglue.c14
-rw-r--r--net/ipv4/ipcomp.c11
-rw-r--r--net/ipv4/ipip.c20
-rw-r--r--net/ipv4/ipmr.c3
-rw-r--r--net/ipv4/netfilter/arp_tables.c381
-rw-r--r--net/ipv4/netfilter/arptable_filter.c95
-rw-r--r--net/ipv4/netfilter/ip_tables.c552
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c14
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c4
-rw-r--r--net/ipv4/netfilter/iptable_filter.c124
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c166
-rw-r--r--net/ipv4/netfilter/iptable_raw.c96
-rw-r--r--net/ipv4/netfilter/iptable_security.c117
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c3
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c11
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c19
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c24
-rw-r--r--net/ipv4/netfilter/nf_nat_ftp.c105
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c39
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c41
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c154
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c31
-rw-r--r--net/ipv4/proc.c32
-rw-r--r--net/ipv4/route.c9
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/sysctl_net_ipv4.c14
-rw-r--r--net/ipv4/tcp.c65
-rw-r--r--net/ipv4/tcp_input.c12
-rw-r--r--net/ipv4/tcp_ipv4.c25
-rw-r--r--net/ipv4/tcp_output.c22
-rw-r--r--net/ipv4/tcp_timer.c27
-rw-r--r--net/ipv4/udp.c19
-rw-r--r--net/ipv4/udplite.c4
-rw-r--r--net/ipv6/addrconf.c91
-rw-r--r--net/ipv6/af_inet6.c32
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/anycast.c2
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/exthdrs.c2
-rw-r--r--net/ipv6/fib6_rules.c4
-rw-r--r--net/ipv6/icmp.c12
-rw-r--r--net/ipv6/ip6_fib.c52
-rw-r--r--net/ipv6/ip6_flowlabel.c9
-rw-r--r--net/ipv6/ip6_input.c3
-rw-r--r--net/ipv6/ip6_output.c11
-rw-r--r--net/ipv6/ip6_tunnel.c43
-rw-r--r--net/ipv6/ipcomp6.c15
-rw-r--r--net/ipv6/mcast.c32
-rw-r--r--net/ipv6/mip6.c2
-rw-r--r--net/ipv6/ndisc.c7
-rw-r--r--net/ipv6/netfilter/ip6_tables.c554
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c2
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c113
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c141
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c86
-rw-r--r--net/ipv6/netfilter/ip6table_security.c109
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c11
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c15
-rw-r--r--net/ipv6/proc.c39
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/reassembly.c27
-rw-r--r--net/ipv6/route.c10
-rw-r--r--net/ipv6/sit.c25
-rw-r--r--net/ipv6/syncookies.c3
-rw-r--r--net/ipv6/sysctl_net_ipv6.c4
-rw-r--r--net/ipv6/tcp_ipv6.c22
-rw-r--r--net/ipv6/tunnel6.c4
-rw-r--r--net/ipv6/udp.c22
-rw-r--r--net/ipv6/udplite.c4
-rw-r--r--net/ipv6/xfrm6_input.c2
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c194
-rw-r--r--net/ipx/ipx_proc.c90
-rw-r--r--net/irda/ircomm/ircomm_tty.c6
-rw-r--r--net/irda/irlan/irlan_common.c28
-rw-r--r--net/irda/irlan/irlan_eth.c5
-rw-r--r--net/irda/irnetlink.c2
-rw-r--r--net/key/af_key.c160
-rw-r--r--net/llc/af_llc.c64
-rw-r--r--net/llc/llc_conn.c143
-rw-r--r--net/llc/llc_core.c53
-rw-r--r--net/llc/llc_output.c45
-rw-r--r--net/llc/llc_proc.c69
-rw-r--r--net/llc/llc_sap.c111
-rw-r--r--net/mac80211/Kconfig12
-rw-r--r--net/mac80211/Makefile4
-rw-r--r--net/mac80211/agg-rx.c15
-rw-r--r--net/mac80211/agg-tx.c38
-rw-r--r--net/mac80211/cfg.c186
-rw-r--r--net/mac80211/debugfs.c127
-rw-r--r--net/mac80211/debugfs_key.c2
-rw-r--r--net/mac80211/debugfs_netdev.c212
-rw-r--r--net/mac80211/debugfs_netdev.h9
-rw-r--r--net/mac80211/debugfs_sta.c88
-rw-r--r--net/mac80211/driver-ops.h169
-rw-r--r--net/mac80211/driver-trace.h174
-rw-r--r--net/mac80211/ht.c53
-rw-r--r--net/mac80211/ibss.c125
-rw-r--r--net/mac80211/ieee80211_i.h209
-rw-r--r--net/mac80211/iface.c108
-rw-r--r--net/mac80211/key.c10
-rw-r--r--net/mac80211/key.h8
-rw-r--r--net/mac80211/main.c68
-rw-r--r--net/mac80211/mesh.c6
-rw-r--r--net/mac80211/mesh_hwmp.c20
-rw-r--r--net/mac80211/mesh_pathtbl.c6
-rw-r--r--net/mac80211/mesh_plink.c23
-rw-r--r--net/mac80211/mlme.c1282
-rw-r--r--net/mac80211/offchannel.c170
-rw-r--r--net/mac80211/pm.c18
-rw-r--r--net/mac80211/rate.c90
-rw-r--r--net/mac80211/rate.h14
-rw-r--r--net/mac80211/rc80211_pid_algo.c8
-rw-r--r--net/mac80211/rx.c333
-rw-r--r--net/mac80211/scan.c249
-rw-r--r--net/mac80211/spectmgmt.c4
-rw-r--r--net/mac80211/sta_info.c777
-rw-r--r--net/mac80211/sta_info.h68
-rw-r--r--net/mac80211/status.c100
-rw-r--r--net/mac80211/tkip.c47
-rw-r--r--net/mac80211/tx.c376
-rw-r--r--net/mac80211/util.c319
-rw-r--r--net/mac80211/wep.c17
-rw-r--r--net/mac80211/wme.c2
-rw-r--r--net/mac80211/work.c1099
-rw-r--r--net/mac80211/wpa.c57
-rw-r--r--net/netfilter/Kconfig25
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipvs/Kconfig11
-rw-r--r--net/netfilter/ipvs/Makefile1
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c42
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c67
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c13
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c1183
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c10
-rw-r--r--net/netfilter/nf_conntrack_core.c164
-rw-r--r--net/netfilter/nf_conntrack_expect.c31
-rw-r--r--net/netfilter/nf_conntrack_extend.c1
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c3
-rw-r--r--net/netfilter/nf_conntrack_helper.c44
-rw-r--r--net/netfilter/nf_conntrack_netlink.c230
-rw-r--r--net/netfilter/nf_conntrack_pptp.c14
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c5
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c4
-rw-r--r--net/netfilter/nf_conntrack_sip.c330
-rw-r--r--net/netfilter/nf_conntrack_standalone.c6
-rw-r--r--net/netfilter/nf_queue.c2
-rw-r--r--net/netfilter/nfnetlink.c65
-rw-r--r--net/netfilter/nfnetlink_log.c3
-rw-r--r--net/netfilter/nfnetlink_queue.c7
-rw-r--r--net/netfilter/x_tables.c80
-rw-r--r--net/netfilter/xt_CT.c164
-rw-r--r--net/netfilter/xt_NFQUEUE.c6
-rw-r--r--net/netfilter/xt_RATEEST.c7
-rw-r--r--net/netfilter/xt_TCPMSS.c30
-rw-r--r--net/netfilter/xt_connlimit.c27
-rw-r--r--net/netfilter/xt_hashlimit.c215
-rw-r--r--net/netfilter/xt_limit.c4
-rw-r--r--net/netfilter/xt_osf.c4
-rw-r--r--net/netfilter/xt_recent.c168
-rw-r--r--net/netfilter/xt_repldata.h35
-rw-r--r--net/netlabel/netlabel_domainhash.c1
-rw-r--r--net/netlabel/netlabel_unlabeled.c3
-rw-r--r--net/netlink/genetlink.c4
-rw-r--r--net/netrom/af_netrom.c21
-rw-r--r--net/netrom/nr_route.c53
-rw-r--r--net/packet/Kconfig10
-rw-r--r--net/packet/af_packet.c298
-rw-r--r--net/phonet/datagram.c6
-rw-r--r--net/phonet/pep-gprs.c4
-rw-r--r--net/phonet/pep.c29
-rw-r--r--net/phonet/pn_dev.c4
-rw-r--r--net/rds/tcp_connect.c7
-rw-r--r--net/rds/tcp_listen.c6
-rw-r--r--net/rds/tcp_send.c4
-rw-r--r--net/rose/af_rose.c22
-rw-r--r--net/sched/sch_api.c1
-rw-r--r--net/sched/sch_fifo.c34
-rw-r--r--net/sctp/bind_addr.c1
-rw-r--r--net/sctp/ipv6.c1
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sctp/protocol.c6
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/sysctl_net.c4
-rw-r--r--net/tipc/Kconfig75
-rw-r--r--net/tipc/core.c10
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/unix/sysctl_net_unix.c2
-rw-r--r--net/wimax/op-msg.c3
-rw-r--r--net/wimax/op-reset.c3
-rw-r--r--net/wimax/op-rfkill.c3
-rw-r--r--net/wimax/op-state-get.c3
-rw-r--r--net/wimax/stack.c3
-rw-r--r--net/wireless/.gitignore1
-rw-r--r--net/wireless/Kconfig13
-rw-r--r--net/wireless/Makefile6
-rw-r--r--net/wireless/chan.c41
-rw-r--r--net/wireless/core.c39
-rw-r--r--net/wireless/core.h11
-rw-r--r--net/wireless/db.txt17
-rw-r--r--net/wireless/genregdb.awk118
-rw-r--r--net/wireless/lib80211_crypt_ccmp.c2
-rw-r--r--net/wireless/lib80211_crypt_tkip.c23
-rw-r--r--net/wireless/mlme.c48
-rw-r--r--net/wireless/nl80211.c426
-rw-r--r--net/wireless/nl80211.h15
-rw-r--r--net/wireless/radiotap.c305
-rw-r--r--net/wireless/reg.c687
-rw-r--r--net/wireless/reg.h29
-rw-r--r--net/wireless/regdb.h7
-rw-r--r--net/wireless/scan.c158
-rw-r--r--net/wireless/sme.c41
-rw-r--r--net/wireless/sysfs.c20
-rw-r--r--net/wireless/util.c137
-rw-r--r--net/wireless/wext-compat.c39
-rw-r--r--net/wireless/wext-proc.c4
-rw-r--r--net/x25/af_x25.c24
-rw-r--r--net/x25/x25_proc.c114
-rw-r--r--net/xfrm/xfrm_input.c2
-rw-r--r--net/xfrm/xfrm_ipcomp.c16
-rw-r--r--net/xfrm/xfrm_policy.c43
-rw-r--r--net/xfrm/xfrm_proc.c6
-rw-r--r--net/xfrm/xfrm_state.c88
-rw-r--r--net/xfrm/xfrm_sysctl.c4
-rw-r--r--net/xfrm/xfrm_user.c111
317 files changed, 15305 insertions, 8663 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 33f90e7362c..453512266ea 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -663,7 +663,7 @@ out:
663 return err; 663 return err;
664} 664}
665 665
666static int vlan_init_net(struct net *net) 666static int __net_init vlan_init_net(struct net *net)
667{ 667{
668 struct vlan_net *vn = net_generic(net, vlan_net_id); 668 struct vlan_net *vn = net_generic(net, vlan_net_id);
669 int err; 669 int err;
@@ -675,7 +675,7 @@ static int vlan_init_net(struct net *net)
675 return err; 675 return err;
676} 676}
677 677
678static void vlan_exit_net(struct net *net) 678static void __net_exit vlan_exit_net(struct net *net)
679{ 679{
680 vlan_proc_cleanup(net); 680 vlan_proc_cleanup(net);
681} 681}
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 5685296017e..6abdcac1b2e 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -61,7 +61,7 @@ struct vlan_dev_info {
61 struct proc_dir_entry *dent; 61 struct proc_dir_entry *dent;
62 unsigned long cnt_inc_headroom_on_tx; 62 unsigned long cnt_inc_headroom_on_tx;
63 unsigned long cnt_encap_on_xmit; 63 unsigned long cnt_encap_on_xmit;
64 struct vlan_rx_stats *vlan_rx_stats; 64 struct vlan_rx_stats __percpu *vlan_rx_stats;
65}; 65};
66 66
67static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) 67static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index e75a2f3b10a..c0316e0ca6e 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -14,6 +14,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
14 if (skb_bond_should_drop(skb)) 14 if (skb_bond_should_drop(skb))
15 goto drop; 15 goto drop;
16 16
17 skb->skb_iif = skb->dev->ifindex;
17 __vlan_hwaccel_put_tag(skb, vlan_tci); 18 __vlan_hwaccel_put_tag(skb, vlan_tci);
18 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 19 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
19 20
@@ -85,6 +86,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
85 if (skb_bond_should_drop(skb)) 86 if (skb_bond_should_drop(skb))
86 goto drop; 87 goto drop;
87 88
89 skb->skb_iif = skb->dev->ifindex;
88 __vlan_hwaccel_put_tag(skb, vlan_tci); 90 __vlan_hwaccel_put_tag(skb, vlan_tci);
89 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 91 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
90 92
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index c1b92cab46c..9e83272fc5b 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -263,11 +263,10 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
263 vhdr->h_vlan_TCI = htons(vlan_tci); 263 vhdr->h_vlan_TCI = htons(vlan_tci);
264 264
265 /* 265 /*
266 * Set the protocol type. For a packet of type ETH_P_802_3 we 266 * Set the protocol type. For a packet of type ETH_P_802_3/2 we
267 * put the length in here instead. It is up to the 802.2 267 * put the length in here instead.
268 * layer to carry protocol information.
269 */ 268 */
270 if (type != ETH_P_802_3) 269 if (type != ETH_P_802_3 && type != ETH_P_802_2)
271 vhdr->h_vlan_encapsulated_proto = htons(type); 270 vhdr->h_vlan_encapsulated_proto = htons(type);
272 else 271 else
273 vhdr->h_vlan_encapsulated_proto = htons(len); 272 vhdr->h_vlan_encapsulated_proto = htons(len);
@@ -323,7 +322,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
323 } 322 }
324 323
325 324
326 skb->dev = vlan_dev_info(dev)->real_dev; 325 skb_set_dev(skb, vlan_dev_info(dev)->real_dev);
327 len = skb->len; 326 len = skb->len;
328 ret = dev_queue_xmit(skb); 327 ret = dev_queue_xmit(skb);
329 328
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 9ec1f057c03..afead353e21 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -140,7 +140,7 @@ void vlan_proc_cleanup(struct net *net)
140 * Create /proc/net/vlan entries 140 * Create /proc/net/vlan entries
141 */ 141 */
142 142
143int vlan_proc_init(struct net *net) 143int __net_init vlan_proc_init(struct net *net)
144{ 144{
145 struct vlan_net *vn = net_generic(net, vlan_net_id); 145 struct vlan_net *vn = net_generic(net, vlan_net_id);
146 146
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index 80caad1a31a..6ef0e761e5d 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -144,40 +144,16 @@ out:
144 return 0; 144 return 0;
145} 145}
146 146
147static __inline__ struct sock *atalk_get_socket_idx(loff_t pos)
148{
149 struct sock *s;
150 struct hlist_node *node;
151
152 sk_for_each(s, node, &atalk_sockets)
153 if (!pos--)
154 goto found;
155 s = NULL;
156found:
157 return s;
158}
159
160static void *atalk_seq_socket_start(struct seq_file *seq, loff_t *pos) 147static void *atalk_seq_socket_start(struct seq_file *seq, loff_t *pos)
161 __acquires(atalk_sockets_lock) 148 __acquires(atalk_sockets_lock)
162{ 149{
163 loff_t l = *pos;
164
165 read_lock_bh(&atalk_sockets_lock); 150 read_lock_bh(&atalk_sockets_lock);
166 return l ? atalk_get_socket_idx(--l) : SEQ_START_TOKEN; 151 return seq_hlist_start_head(&atalk_sockets, *pos);
167} 152}
168 153
169static void *atalk_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos) 154static void *atalk_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
170{ 155{
171 struct sock *i; 156 return seq_hlist_next(v, &atalk_sockets, pos);
172
173 ++*pos;
174 if (v == SEQ_START_TOKEN) {
175 i = sk_head(&atalk_sockets);
176 goto out;
177 }
178 i = sk_next(v);
179out:
180 return i;
181} 157}
182 158
183static void atalk_seq_socket_stop(struct seq_file *seq, void *v) 159static void atalk_seq_socket_stop(struct seq_file *seq, void *v)
@@ -197,7 +173,7 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v)
197 goto out; 173 goto out;
198 } 174 }
199 175
200 s = v; 176 s = sk_entry(v);
201 at = at_sk(s); 177 at = at_sk(s);
202 178
203 seq_printf(seq, "%02X %04X:%02X:%02X %04X:%02X:%02X %08X:%08X " 179 seq_printf(seq, "%02X %04X:%02X:%02X %04X:%02X:%02X %08X:%08X "
diff --git a/net/atm/addr.c b/net/atm/addr.c
index 82e85abc303..cf3ae8b4757 100644
--- a/net/atm/addr.c
+++ b/net/atm/addr.c
@@ -4,7 +4,7 @@
4 4
5#include <linux/atm.h> 5#include <linux/atm.h>
6#include <linux/atmdev.h> 6#include <linux/atmdev.h>
7#include <asm/uaccess.h> 7#include <linux/uaccess.h>
8 8
9#include "signaling.h" 9#include "signaling.h"
10#include "addr.h" 10#include "addr.h"
diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
index 02cc7e71efe..fc63526d869 100644
--- a/net/atm/atm_misc.c
+++ b/net/atm/atm_misc.c
@@ -2,37 +2,35 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL ICA */
4 4
5
6#include <linux/module.h> 5#include <linux/module.h>
7#include <linux/atm.h> 6#include <linux/atm.h>
8#include <linux/atmdev.h> 7#include <linux/atmdev.h>
9#include <linux/skbuff.h> 8#include <linux/skbuff.h>
10#include <linux/sonet.h> 9#include <linux/sonet.h>
11#include <linux/bitops.h> 10#include <linux/bitops.h>
11#include <linux/errno.h>
12#include <asm/atomic.h> 12#include <asm/atomic.h>
13#include <asm/errno.h>
14
15 13
16int atm_charge(struct atm_vcc *vcc,int truesize) 14int atm_charge(struct atm_vcc *vcc, int truesize)
17{ 15{
18 atm_force_charge(vcc,truesize); 16 atm_force_charge(vcc, truesize);
19 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf) 17 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
20 return 1; 18 return 1;
21 atm_return(vcc,truesize); 19 atm_return(vcc, truesize);
22 atomic_inc(&vcc->stats->rx_drop); 20 atomic_inc(&vcc->stats->rx_drop);
23 return 0; 21 return 0;
24} 22}
23EXPORT_SYMBOL(atm_charge);
25 24
26 25struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
27struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, 26 gfp_t gfp_flags)
28 gfp_t gfp_flags)
29{ 27{
30 struct sock *sk = sk_atm(vcc); 28 struct sock *sk = sk_atm(vcc);
31 int guess = atm_guess_pdu2truesize(pdu_size); 29 int guess = atm_guess_pdu2truesize(pdu_size);
32 30
33 atm_force_charge(vcc,guess); 31 atm_force_charge(vcc, guess);
34 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { 32 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
35 struct sk_buff *skb = alloc_skb(pdu_size,gfp_flags); 33 struct sk_buff *skb = alloc_skb(pdu_size, gfp_flags);
36 34
37 if (skb) { 35 if (skb) {
38 atomic_add(skb->truesize-guess, 36 atomic_add(skb->truesize-guess,
@@ -40,10 +38,11 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
40 return skb; 38 return skb;
41 } 39 }
42 } 40 }
43 atm_return(vcc,guess); 41 atm_return(vcc, guess);
44 atomic_inc(&vcc->stats->rx_drop); 42 atomic_inc(&vcc->stats->rx_drop);
45 return NULL; 43 return NULL;
46} 44}
45EXPORT_SYMBOL(atm_alloc_charge);
47 46
48 47
49/* 48/*
@@ -73,7 +72,6 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
73 * else * 72 * else *
74 */ 73 */
75 74
76
77int atm_pcr_goal(const struct atm_trafprm *tp) 75int atm_pcr_goal(const struct atm_trafprm *tp)
78{ 76{
79 if (tp->pcr && tp->pcr != ATM_MAX_PCR) 77 if (tp->pcr && tp->pcr != ATM_MAX_PCR)
@@ -84,26 +82,20 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
84 return -tp->max_pcr; 82 return -tp->max_pcr;
85 return 0; 83 return 0;
86} 84}
85EXPORT_SYMBOL(atm_pcr_goal);
87 86
88 87void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
89void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
90{ 88{
91#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) 89#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
92 __SONET_ITEMS 90 __SONET_ITEMS
93#undef __HANDLE_ITEM 91#undef __HANDLE_ITEM
94} 92}
93EXPORT_SYMBOL(sonet_copy_stats);
95 94
96 95void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
97void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
98{ 96{
99#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i) 97#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100 __SONET_ITEMS 98 __SONET_ITEMS
101#undef __HANDLE_ITEM 99#undef __HANDLE_ITEM
102} 100}
103
104
105EXPORT_SYMBOL(atm_charge);
106EXPORT_SYMBOL(atm_alloc_charge);
107EXPORT_SYMBOL(atm_pcr_goal);
108EXPORT_SYMBOL(sonet_copy_stats);
109EXPORT_SYMBOL(sonet_subtract_stats); 101EXPORT_SYMBOL(sonet_subtract_stats);
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index b5674dc2083..f693b78eb46 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -42,13 +42,14 @@ static ssize_t show_atmaddress(struct device *cdev,
42 42
43 spin_lock_irqsave(&adev->lock, flags); 43 spin_lock_irqsave(&adev->lock, flags);
44 list_for_each_entry(aaddr, &adev->local, entry) { 44 list_for_each_entry(aaddr, &adev->local, entry) {
45 for(i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) { 45 for (i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) {
46 if (j == *fmt) { 46 if (j == *fmt) {
47 pos += sprintf(pos, "."); 47 pos += sprintf(pos, ".");
48 ++fmt; 48 ++fmt;
49 j = 0; 49 j = 0;
50 } 50 }
51 pos += sprintf(pos, "%02x", aaddr->addr.sas_addr.prv[i]); 51 pos += sprintf(pos, "%02x",
52 aaddr->addr.sas_addr.prv[i]);
52 } 53 }
53 pos += sprintf(pos, "\n"); 54 pos += sprintf(pos, "\n");
54 } 55 }
@@ -78,17 +79,17 @@ static ssize_t show_link_rate(struct device *cdev,
78 79
79 /* show the link rate, not the data rate */ 80 /* show the link rate, not the data rate */
80 switch (adev->link_rate) { 81 switch (adev->link_rate) {
81 case ATM_OC3_PCR: 82 case ATM_OC3_PCR:
82 link_rate = 155520000; 83 link_rate = 155520000;
83 break; 84 break;
84 case ATM_OC12_PCR: 85 case ATM_OC12_PCR:
85 link_rate = 622080000; 86 link_rate = 622080000;
86 break; 87 break;
87 case ATM_25_PCR: 88 case ATM_25_PCR:
88 link_rate = 25600000; 89 link_rate = 25600000;
89 break; 90 break;
90 default: 91 default:
91 link_rate = adev->link_rate * 8 * 53; 92 link_rate = adev->link_rate * 8 * 53;
92 } 93 }
93 pos += sprintf(pos, "%d\n", link_rate); 94 pos += sprintf(pos, "%d\n", link_rate);
94 95
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index c9230c39869..4d64d87e757 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -6,6 +6,8 @@
6 * Eric Kinzie, 2006-2007, US Naval Research Laboratory 6 * Eric Kinzie, 2006-2007, US Naval Research Laboratory
7 */ 7 */
8 8
9#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
10
9#include <linux/module.h> 11#include <linux/module.h>
10#include <linux/init.h> 12#include <linux/init.h>
11#include <linux/kernel.h> 13#include <linux/kernel.h>
@@ -15,7 +17,7 @@
15#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
16#include <linux/rtnetlink.h> 18#include <linux/rtnetlink.h>
17#include <linux/ip.h> 19#include <linux/ip.h>
18#include <asm/uaccess.h> 20#include <linux/uaccess.h>
19#include <net/arp.h> 21#include <net/arp.h>
20#include <linux/atm.h> 22#include <linux/atm.h>
21#include <linux/atmdev.h> 23#include <linux/atmdev.h>
@@ -26,20 +28,14 @@
26 28
27#include "common.h" 29#include "common.h"
28 30
29#ifdef SKB_DEBUG
30static void skb_debug(const struct sk_buff *skb) 31static void skb_debug(const struct sk_buff *skb)
31{ 32{
33#ifdef SKB_DEBUG
32#define NUM2PRINT 50 34#define NUM2PRINT 50
33 char buf[NUM2PRINT * 3 + 1]; /* 3 chars per byte */ 35 print_hex_dump(KERN_DEBUG, "br2684: skb: ", DUMP_OFFSET,
34 int i = 0; 36 16, 1, skb->data, min(NUM2PRINT, skb->len), true);
35 for (i = 0; i < skb->len && i < NUM2PRINT; i++) {
36 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
37 }
38 printk(KERN_DEBUG "br2684: skb: %s\n", buf);
39}
40#else
41#define skb_debug(skb) do {} while (0)
42#endif 37#endif
38}
43 39
44#define BR2684_ETHERTYPE_LEN 2 40#define BR2684_ETHERTYPE_LEN 2
45#define BR2684_PAD_LEN 2 41#define BR2684_PAD_LEN 2
@@ -68,7 +64,7 @@ struct br2684_vcc {
68 struct atm_vcc *atmvcc; 64 struct atm_vcc *atmvcc;
69 struct net_device *device; 65 struct net_device *device;
70 /* keep old push, pop functions for chaining */ 66 /* keep old push, pop functions for chaining */
71 void (*old_push) (struct atm_vcc * vcc, struct sk_buff * skb); 67 void (*old_push)(struct atm_vcc *vcc, struct sk_buff *skb);
72 void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb); 68 void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb);
73 enum br2684_encaps encaps; 69 enum br2684_encaps encaps;
74 struct list_head brvccs; 70 struct list_head brvccs;
@@ -148,7 +144,7 @@ static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb)
148 struct br2684_vcc *brvcc = BR2684_VCC(vcc); 144 struct br2684_vcc *brvcc = BR2684_VCC(vcc);
149 struct net_device *net_dev = skb->dev; 145 struct net_device *net_dev = skb->dev;
150 146
151 pr_debug("br2684_pop(vcc %p ; net_dev %p )\n", vcc, net_dev); 147 pr_debug("(vcc %p ; net_dev %p )\n", vcc, net_dev);
152 brvcc->old_pop(vcc, skb); 148 brvcc->old_pop(vcc, skb);
153 149
154 if (!net_dev) 150 if (!net_dev)
@@ -244,7 +240,7 @@ static netdev_tx_t br2684_start_xmit(struct sk_buff *skb,
244 struct br2684_dev *brdev = BRPRIV(dev); 240 struct br2684_dev *brdev = BRPRIV(dev);
245 struct br2684_vcc *brvcc; 241 struct br2684_vcc *brvcc;
246 242
247 pr_debug("br2684_start_xmit, skb_dst(skb)=%p\n", skb_dst(skb)); 243 pr_debug("skb_dst(skb)=%p\n", skb_dst(skb));
248 read_lock(&devs_lock); 244 read_lock(&devs_lock);
249 brvcc = pick_outgoing_vcc(skb, brdev); 245 brvcc = pick_outgoing_vcc(skb, brdev);
250 if (brvcc == NULL) { 246 if (brvcc == NULL) {
@@ -300,7 +296,8 @@ static int br2684_setfilt(struct atm_vcc *atmvcc, void __user * arg)
300 struct br2684_dev *brdev; 296 struct br2684_dev *brdev;
301 read_lock(&devs_lock); 297 read_lock(&devs_lock);
302 brdev = BRPRIV(br2684_find_dev(&fs.ifspec)); 298 brdev = BRPRIV(br2684_find_dev(&fs.ifspec));
303 if (brdev == NULL || list_empty(&brdev->brvccs) || brdev->brvccs.next != brdev->brvccs.prev) /* >1 VCC */ 299 if (brdev == NULL || list_empty(&brdev->brvccs) ||
300 brdev->brvccs.next != brdev->brvccs.prev) /* >1 VCC */
304 brvcc = NULL; 301 brvcc = NULL;
305 else 302 else
306 brvcc = list_entry_brvcc(brdev->brvccs.next); 303 brvcc = list_entry_brvcc(brdev->brvccs.next);
@@ -352,7 +349,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
352 struct net_device *net_dev = brvcc->device; 349 struct net_device *net_dev = brvcc->device;
353 struct br2684_dev *brdev = BRPRIV(net_dev); 350 struct br2684_dev *brdev = BRPRIV(net_dev);
354 351
355 pr_debug("br2684_push\n"); 352 pr_debug("\n");
356 353
357 if (unlikely(skb == NULL)) { 354 if (unlikely(skb == NULL)) {
358 /* skb==NULL means VCC is being destroyed */ 355 /* skb==NULL means VCC is being destroyed */
@@ -376,29 +373,25 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
376 __skb_trim(skb, skb->len - 4); 373 __skb_trim(skb, skb->len - 4);
377 374
378 /* accept packets that have "ipv[46]" in the snap header */ 375 /* accept packets that have "ipv[46]" in the snap header */
379 if ((skb->len >= (sizeof(llc_oui_ipv4))) 376 if ((skb->len >= (sizeof(llc_oui_ipv4))) &&
380 && 377 (memcmp(skb->data, llc_oui_ipv4,
381 (memcmp 378 sizeof(llc_oui_ipv4) - BR2684_ETHERTYPE_LEN) == 0)) {
382 (skb->data, llc_oui_ipv4, 379 if (memcmp(skb->data + 6, ethertype_ipv6,
383 sizeof(llc_oui_ipv4) - BR2684_ETHERTYPE_LEN) == 0)) { 380 sizeof(ethertype_ipv6)) == 0)
384 if (memcmp
385 (skb->data + 6, ethertype_ipv6,
386 sizeof(ethertype_ipv6)) == 0)
387 skb->protocol = htons(ETH_P_IPV6); 381 skb->protocol = htons(ETH_P_IPV6);
388 else if (memcmp 382 else if (memcmp(skb->data + 6, ethertype_ipv4,
389 (skb->data + 6, ethertype_ipv4, 383 sizeof(ethertype_ipv4)) == 0)
390 sizeof(ethertype_ipv4)) == 0)
391 skb->protocol = htons(ETH_P_IP); 384 skb->protocol = htons(ETH_P_IP);
392 else 385 else
393 goto error; 386 goto error;
394 skb_pull(skb, sizeof(llc_oui_ipv4)); 387 skb_pull(skb, sizeof(llc_oui_ipv4));
395 skb_reset_network_header(skb); 388 skb_reset_network_header(skb);
396 skb->pkt_type = PACKET_HOST; 389 skb->pkt_type = PACKET_HOST;
397 /* 390 /*
398 * Let us waste some time for checking the encapsulation. 391 * Let us waste some time for checking the encapsulation.
399 * Note, that only 7 char is checked so frames with a valid FCS 392 * Note, that only 7 char is checked so frames with a valid FCS
400 * are also accepted (but FCS is not checked of course). 393 * are also accepted (but FCS is not checked of course).
401 */ 394 */
402 } else if ((skb->len >= sizeof(llc_oui_pid_pad)) && 395 } else if ((skb->len >= sizeof(llc_oui_pid_pad)) &&
403 (memcmp(skb->data, llc_oui_pid_pad, 7) == 0)) { 396 (memcmp(skb->data, llc_oui_pid_pad, 7) == 0)) {
404 skb_pull(skb, sizeof(llc_oui_pid_pad)); 397 skb_pull(skb, sizeof(llc_oui_pid_pad));
@@ -479,8 +472,7 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
479 write_lock_irq(&devs_lock); 472 write_lock_irq(&devs_lock);
480 net_dev = br2684_find_dev(&be.ifspec); 473 net_dev = br2684_find_dev(&be.ifspec);
481 if (net_dev == NULL) { 474 if (net_dev == NULL) {
482 printk(KERN_ERR 475 pr_err("tried to attach to non-existant device\n");
483 "br2684: tried to attach to non-existant device\n");
484 err = -ENXIO; 476 err = -ENXIO;
485 goto error; 477 goto error;
486 } 478 }
@@ -494,17 +486,16 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
494 err = -EEXIST; 486 err = -EEXIST;
495 goto error; 487 goto error;
496 } 488 }
497 if (be.fcs_in != BR2684_FCSIN_NO || be.fcs_out != BR2684_FCSOUT_NO || 489 if (be.fcs_in != BR2684_FCSIN_NO ||
498 be.fcs_auto || be.has_vpiid || be.send_padding || (be.encaps != 490 be.fcs_out != BR2684_FCSOUT_NO ||
499 BR2684_ENCAPS_VC 491 be.fcs_auto || be.has_vpiid || be.send_padding ||
500 && be.encaps != 492 (be.encaps != BR2684_ENCAPS_VC &&
501 BR2684_ENCAPS_LLC) 493 be.encaps != BR2684_ENCAPS_LLC) ||
502 || be.min_size != 0) { 494 be.min_size != 0) {
503 err = -EINVAL; 495 err = -EINVAL;
504 goto error; 496 goto error;
505 } 497 }
506 pr_debug("br2684_regvcc vcc=%p, encaps=%d, brvcc=%p\n", atmvcc, 498 pr_debug("vcc=%p, encaps=%d, brvcc=%p\n", atmvcc, be.encaps, brvcc);
507 be.encaps, brvcc);
508 if (list_empty(&brdev->brvccs) && !brdev->mac_was_set) { 499 if (list_empty(&brdev->brvccs) && !brdev->mac_was_set) {
509 unsigned char *esi = atmvcc->dev->esi; 500 unsigned char *esi = atmvcc->dev->esi;
510 if (esi[0] | esi[1] | esi[2] | esi[3] | esi[4] | esi[5]) 501 if (esi[0] | esi[1] | esi[2] | esi[3] | esi[4] | esi[5])
@@ -541,7 +532,8 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
541 } 532 }
542 __module_get(THIS_MODULE); 533 __module_get(THIS_MODULE);
543 return 0; 534 return 0;
544 error: 535
536error:
545 write_unlock_irq(&devs_lock); 537 write_unlock_irq(&devs_lock);
546 kfree(brvcc); 538 kfree(brvcc);
547 return err; 539 return err;
@@ -587,7 +579,7 @@ static void br2684_setup_routed(struct net_device *netdev)
587 INIT_LIST_HEAD(&brdev->brvccs); 579 INIT_LIST_HEAD(&brdev->brvccs);
588} 580}
589 581
590static int br2684_create(void __user * arg) 582static int br2684_create(void __user *arg)
591{ 583{
592 int err; 584 int err;
593 struct net_device *netdev; 585 struct net_device *netdev;
@@ -595,11 +587,10 @@ static int br2684_create(void __user * arg)
595 struct atm_newif_br2684 ni; 587 struct atm_newif_br2684 ni;
596 enum br2684_payload payload; 588 enum br2684_payload payload;
597 589
598 pr_debug("br2684_create\n"); 590 pr_debug("\n");
599 591
600 if (copy_from_user(&ni, arg, sizeof ni)) { 592 if (copy_from_user(&ni, arg, sizeof ni))
601 return -EFAULT; 593 return -EFAULT;
602 }
603 594
604 if (ni.media & BR2684_FLAG_ROUTED) 595 if (ni.media & BR2684_FLAG_ROUTED)
605 payload = p_routed; 596 payload = p_routed;
@@ -607,9 +598,8 @@ static int br2684_create(void __user * arg)
607 payload = p_bridged; 598 payload = p_bridged;
608 ni.media &= 0xffff; /* strip flags */ 599 ni.media &= 0xffff; /* strip flags */
609 600
610 if (ni.media != BR2684_MEDIA_ETHERNET || ni.mtu != 1500) { 601 if (ni.media != BR2684_MEDIA_ETHERNET || ni.mtu != 1500)
611 return -EINVAL; 602 return -EINVAL;
612 }
613 603
614 netdev = alloc_netdev(sizeof(struct br2684_dev), 604 netdev = alloc_netdev(sizeof(struct br2684_dev),
615 ni.ifname[0] ? ni.ifname : "nas%d", 605 ni.ifname[0] ? ni.ifname : "nas%d",
@@ -624,7 +614,7 @@ static int br2684_create(void __user * arg)
624 /* open, stop, do_ioctl ? */ 614 /* open, stop, do_ioctl ? */
625 err = register_netdev(netdev); 615 err = register_netdev(netdev);
626 if (err < 0) { 616 if (err < 0) {
627 printk(KERN_ERR "br2684_create: register_netdev failed\n"); 617 pr_err("register_netdev failed\n");
628 free_netdev(netdev); 618 free_netdev(netdev);
629 return err; 619 return err;
630 } 620 }
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 64629c35434..ebfa022008f 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -2,6 +2,8 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
6
5#include <linux/string.h> 7#include <linux/string.h>
6#include <linux/errno.h> 8#include <linux/errno.h>
7#include <linux/kernel.h> /* for UINT_MAX */ 9#include <linux/kernel.h> /* for UINT_MAX */
@@ -30,10 +32,10 @@
30#include <linux/jhash.h> 32#include <linux/jhash.h>
31#include <net/route.h> /* for struct rtable and routing */ 33#include <net/route.h> /* for struct rtable and routing */
32#include <net/icmp.h> /* icmp_send */ 34#include <net/icmp.h> /* icmp_send */
33#include <asm/param.h> /* for HZ */ 35#include <linux/param.h> /* for HZ */
36#include <linux/uaccess.h>
34#include <asm/byteorder.h> /* for htons etc. */ 37#include <asm/byteorder.h> /* for htons etc. */
35#include <asm/system.h> /* save/restore_flags */ 38#include <asm/system.h> /* save/restore_flags */
36#include <asm/uaccess.h>
37#include <asm/atomic.h> 39#include <asm/atomic.h>
38 40
39#include "common.h" 41#include "common.h"
@@ -51,13 +53,13 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
51 struct atmarp_ctrl *ctrl; 53 struct atmarp_ctrl *ctrl;
52 struct sk_buff *skb; 54 struct sk_buff *skb;
53 55
54 pr_debug("to_atmarpd(%d)\n", type); 56 pr_debug("(%d)\n", type);
55 if (!atmarpd) 57 if (!atmarpd)
56 return -EUNATCH; 58 return -EUNATCH;
57 skb = alloc_skb(sizeof(struct atmarp_ctrl),GFP_ATOMIC); 59 skb = alloc_skb(sizeof(struct atmarp_ctrl), GFP_ATOMIC);
58 if (!skb) 60 if (!skb)
59 return -ENOMEM; 61 return -ENOMEM;
60 ctrl = (struct atmarp_ctrl *) skb_put(skb,sizeof(struct atmarp_ctrl)); 62 ctrl = (struct atmarp_ctrl *)skb_put(skb, sizeof(struct atmarp_ctrl));
61 ctrl->type = type; 63 ctrl->type = type;
62 ctrl->itf_num = itf; 64 ctrl->itf_num = itf;
63 ctrl->ip = ip; 65 ctrl->ip = ip;
@@ -71,8 +73,7 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
71 73
72static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry) 74static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry)
73{ 75{
74 pr_debug("link_vcc %p to entry %p (neigh %p)\n", clip_vcc, entry, 76 pr_debug("%p to entry %p (neigh %p)\n", clip_vcc, entry, entry->neigh);
75 entry->neigh);
76 clip_vcc->entry = entry; 77 clip_vcc->entry = entry;
77 clip_vcc->xoff = 0; /* @@@ may overrun buffer by one packet */ 78 clip_vcc->xoff = 0; /* @@@ may overrun buffer by one packet */
78 clip_vcc->next = entry->vccs; 79 clip_vcc->next = entry->vccs;
@@ -86,7 +87,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
86 struct clip_vcc **walk; 87 struct clip_vcc **walk;
87 88
88 if (!entry) { 89 if (!entry) {
89 printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); 90 pr_crit("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
90 return; 91 return;
91 } 92 }
92 netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */ 93 netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */
@@ -106,13 +107,11 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
106 error = neigh_update(entry->neigh, NULL, NUD_NONE, 107 error = neigh_update(entry->neigh, NULL, NUD_NONE,
107 NEIGH_UPDATE_F_ADMIN); 108 NEIGH_UPDATE_F_ADMIN);
108 if (error) 109 if (error)
109 printk(KERN_CRIT "unlink_clip_vcc: " 110 pr_crit("neigh_update failed with %d\n", error);
110 "neigh_update failed with %d\n", error);
111 goto out; 111 goto out;
112 } 112 }
113 printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc " 113 pr_crit("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc);
114 "0x%p)\n", entry, clip_vcc); 114out:
115 out:
116 netif_tx_unlock_bh(entry->neigh->dev); 115 netif_tx_unlock_bh(entry->neigh->dev);
117} 116}
118 117
@@ -127,7 +126,7 @@ static int neigh_check_cb(struct neighbour *n)
127 126
128 if (cv->idle_timeout && time_after(jiffies, exp)) { 127 if (cv->idle_timeout && time_after(jiffies, exp)) {
129 pr_debug("releasing vcc %p->%p of entry %p\n", 128 pr_debug("releasing vcc %p->%p of entry %p\n",
130 cv, cv->vcc, entry); 129 cv, cv->vcc, entry);
131 vcc_release_async(cv->vcc, -ETIMEDOUT); 130 vcc_release_async(cv->vcc, -ETIMEDOUT);
132 } 131 }
133 } 132 }
@@ -139,7 +138,7 @@ static int neigh_check_cb(struct neighbour *n)
139 struct sk_buff *skb; 138 struct sk_buff *skb;
140 139
141 pr_debug("destruction postponed with ref %d\n", 140 pr_debug("destruction postponed with ref %d\n",
142 atomic_read(&n->refcnt)); 141 atomic_read(&n->refcnt));
143 142
144 while ((skb = skb_dequeue(&n->arp_queue)) != NULL) 143 while ((skb = skb_dequeue(&n->arp_queue)) != NULL)
145 dev_kfree_skb(skb); 144 dev_kfree_skb(skb);
@@ -163,7 +162,7 @@ static int clip_arp_rcv(struct sk_buff *skb)
163{ 162{
164 struct atm_vcc *vcc; 163 struct atm_vcc *vcc;
165 164
166 pr_debug("clip_arp_rcv\n"); 165 pr_debug("\n");
167 vcc = ATM_SKB(skb)->vcc; 166 vcc = ATM_SKB(skb)->vcc;
168 if (!vcc || !atm_charge(vcc, skb->truesize)) { 167 if (!vcc || !atm_charge(vcc, skb->truesize)) {
169 dev_kfree_skb_any(skb); 168 dev_kfree_skb_any(skb);
@@ -188,7 +187,7 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
188{ 187{
189 struct clip_vcc *clip_vcc = CLIP_VCC(vcc); 188 struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
190 189
191 pr_debug("clip push\n"); 190 pr_debug("\n");
192 if (!skb) { 191 if (!skb) {
193 pr_debug("removing VCC %p\n", clip_vcc); 192 pr_debug("removing VCC %p\n", clip_vcc);
194 if (clip_vcc->entry) 193 if (clip_vcc->entry)
@@ -206,12 +205,12 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
206 } 205 }
207 ATM_SKB(skb)->vcc = vcc; 206 ATM_SKB(skb)->vcc = vcc;
208 skb_reset_mac_header(skb); 207 skb_reset_mac_header(skb);
209 if (!clip_vcc->encap 208 if (!clip_vcc->encap ||
210 || skb->len < RFC1483LLC_LEN 209 skb->len < RFC1483LLC_LEN ||
211 || memcmp(skb->data, llc_oui, sizeof (llc_oui))) 210 memcmp(skb->data, llc_oui, sizeof(llc_oui)))
212 skb->protocol = htons(ETH_P_IP); 211 skb->protocol = htons(ETH_P_IP);
213 else { 212 else {
214 skb->protocol = ((__be16 *) skb->data)[3]; 213 skb->protocol = ((__be16 *)skb->data)[3];
215 skb_pull(skb, RFC1483LLC_LEN); 214 skb_pull(skb, RFC1483LLC_LEN);
216 if (skb->protocol == htons(ETH_P_ARP)) { 215 if (skb->protocol == htons(ETH_P_ARP)) {
217 skb->dev->stats.rx_packets++; 216 skb->dev->stats.rx_packets++;
@@ -239,7 +238,7 @@ static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb)
239 int old; 238 int old;
240 unsigned long flags; 239 unsigned long flags;
241 240
242 pr_debug("clip_pop(vcc %p)\n", vcc); 241 pr_debug("(vcc %p)\n", vcc);
243 clip_vcc->old_pop(vcc, skb); 242 clip_vcc->old_pop(vcc, skb);
244 /* skb->dev == NULL in outbound ARP packets */ 243 /* skb->dev == NULL in outbound ARP packets */
245 if (!dev) 244 if (!dev)
@@ -255,7 +254,7 @@ static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb)
255 254
256static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb) 255static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb)
257{ 256{
258 pr_debug("clip_neigh_solicit (neigh %p, skb %p)\n", neigh, skb); 257 pr_debug("(neigh %p, skb %p)\n", neigh, skb);
259 to_atmarpd(act_need, PRIV(neigh->dev)->number, NEIGH2ENTRY(neigh)->ip); 258 to_atmarpd(act_need, PRIV(neigh->dev)->number, NEIGH2ENTRY(neigh)->ip);
260} 259}
261 260
@@ -284,7 +283,7 @@ static int clip_constructor(struct neighbour *neigh)
284 struct in_device *in_dev; 283 struct in_device *in_dev;
285 struct neigh_parms *parms; 284 struct neigh_parms *parms;
286 285
287 pr_debug("clip_constructor (neigh %p, entry %p)\n", neigh, entry); 286 pr_debug("(neigh %p, entry %p)\n", neigh, entry);
288 neigh->type = inet_addr_type(&init_net, entry->ip); 287 neigh->type = inet_addr_type(&init_net, entry->ip);
289 if (neigh->type != RTN_UNICAST) 288 if (neigh->type != RTN_UNICAST)
290 return -EINVAL; 289 return -EINVAL;
@@ -369,9 +368,9 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
369 int old; 368 int old;
370 unsigned long flags; 369 unsigned long flags;
371 370
372 pr_debug("clip_start_xmit (skb %p)\n", skb); 371 pr_debug("(skb %p)\n", skb);
373 if (!skb_dst(skb)) { 372 if (!skb_dst(skb)) {
374 printk(KERN_ERR "clip_start_xmit: skb_dst(skb) == NULL\n"); 373 pr_err("skb_dst(skb) == NULL\n");
375 dev_kfree_skb(skb); 374 dev_kfree_skb(skb);
376 dev->stats.tx_dropped++; 375 dev->stats.tx_dropped++;
377 return NETDEV_TX_OK; 376 return NETDEV_TX_OK;
@@ -385,7 +384,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
385 return 0; 384 return 0;
386 } 385 }
387#endif 386#endif
388 printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR !\n"); 387 pr_err("NO NEIGHBOUR !\n");
389 dev_kfree_skb(skb); 388 dev_kfree_skb(skb);
390 dev->stats.tx_dropped++; 389 dev->stats.tx_dropped++;
391 return NETDEV_TX_OK; 390 return NETDEV_TX_OK;
@@ -421,7 +420,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
421 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev); 420 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
422 old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */ 421 old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
423 if (old) { 422 if (old) {
424 printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n"); 423 pr_warning("XOFF->XOFF transition\n");
425 return NETDEV_TX_OK; 424 return NETDEV_TX_OK;
426 } 425 }
427 dev->stats.tx_packets++; 426 dev->stats.tx_packets++;
@@ -456,7 +455,7 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout)
456 clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL); 455 clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL);
457 if (!clip_vcc) 456 if (!clip_vcc)
458 return -ENOMEM; 457 return -ENOMEM;
459 pr_debug("mkip clip_vcc %p vcc %p\n", clip_vcc, vcc); 458 pr_debug("%p vcc %p\n", clip_vcc, vcc);
460 clip_vcc->vcc = vcc; 459 clip_vcc->vcc = vcc;
461 vcc->user_back = clip_vcc; 460 vcc->user_back = clip_vcc;
462 set_bit(ATM_VF_IS_CLIP, &vcc->flags); 461 set_bit(ATM_VF_IS_CLIP, &vcc->flags);
@@ -506,16 +505,16 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
506 struct rtable *rt; 505 struct rtable *rt;
507 506
508 if (vcc->push != clip_push) { 507 if (vcc->push != clip_push) {
509 printk(KERN_WARNING "clip_setentry: non-CLIP VCC\n"); 508 pr_warning("non-CLIP VCC\n");
510 return -EBADF; 509 return -EBADF;
511 } 510 }
512 clip_vcc = CLIP_VCC(vcc); 511 clip_vcc = CLIP_VCC(vcc);
513 if (!ip) { 512 if (!ip) {
514 if (!clip_vcc->entry) { 513 if (!clip_vcc->entry) {
515 printk(KERN_ERR "hiding hidden ATMARP entry\n"); 514 pr_err("hiding hidden ATMARP entry\n");
516 return 0; 515 return 0;
517 } 516 }
518 pr_debug("setentry: remove\n"); 517 pr_debug("remove\n");
519 unlink_clip_vcc(clip_vcc); 518 unlink_clip_vcc(clip_vcc);
520 return 0; 519 return 0;
521 } 520 }
@@ -529,9 +528,9 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
529 entry = NEIGH2ENTRY(neigh); 528 entry = NEIGH2ENTRY(neigh);
530 if (entry != clip_vcc->entry) { 529 if (entry != clip_vcc->entry) {
531 if (!clip_vcc->entry) 530 if (!clip_vcc->entry)
532 pr_debug("setentry: add\n"); 531 pr_debug("add\n");
533 else { 532 else {
534 pr_debug("setentry: update\n"); 533 pr_debug("update\n");
535 unlink_clip_vcc(clip_vcc); 534 unlink_clip_vcc(clip_vcc);
536 } 535 }
537 link_vcc(clip_vcc, entry); 536 link_vcc(clip_vcc, entry);
@@ -614,16 +613,16 @@ static int clip_device_event(struct notifier_block *this, unsigned long event,
614 613
615 switch (event) { 614 switch (event) {
616 case NETDEV_UP: 615 case NETDEV_UP:
617 pr_debug("clip_device_event NETDEV_UP\n"); 616 pr_debug("NETDEV_UP\n");
618 to_atmarpd(act_up, PRIV(dev)->number, 0); 617 to_atmarpd(act_up, PRIV(dev)->number, 0);
619 break; 618 break;
620 case NETDEV_GOING_DOWN: 619 case NETDEV_GOING_DOWN:
621 pr_debug("clip_device_event NETDEV_DOWN\n"); 620 pr_debug("NETDEV_DOWN\n");
622 to_atmarpd(act_down, PRIV(dev)->number, 0); 621 to_atmarpd(act_down, PRIV(dev)->number, 0);
623 break; 622 break;
624 case NETDEV_CHANGE: 623 case NETDEV_CHANGE:
625 case NETDEV_CHANGEMTU: 624 case NETDEV_CHANGEMTU:
626 pr_debug("clip_device_event NETDEV_CHANGE*\n"); 625 pr_debug("NETDEV_CHANGE*\n");
627 to_atmarpd(act_change, PRIV(dev)->number, 0); 626 to_atmarpd(act_change, PRIV(dev)->number, 0);
628 break; 627 break;
629 } 628 }
@@ -645,7 +644,6 @@ static int clip_inet_event(struct notifier_block *this, unsigned long event,
645 return clip_device_event(this, NETDEV_CHANGE, in_dev->dev); 644 return clip_device_event(this, NETDEV_CHANGE, in_dev->dev);
646} 645}
647 646
648
649static struct notifier_block clip_dev_notifier = { 647static struct notifier_block clip_dev_notifier = {
650 .notifier_call = clip_device_event, 648 .notifier_call = clip_device_event,
651}; 649};
@@ -660,7 +658,7 @@ static struct notifier_block clip_inet_notifier = {
660 658
661static void atmarpd_close(struct atm_vcc *vcc) 659static void atmarpd_close(struct atm_vcc *vcc)
662{ 660{
663 pr_debug("atmarpd_close\n"); 661 pr_debug("\n");
664 662
665 rtnl_lock(); 663 rtnl_lock();
666 atmarpd = NULL; 664 atmarpd = NULL;
@@ -671,7 +669,6 @@ static void atmarpd_close(struct atm_vcc *vcc)
671 module_put(THIS_MODULE); 669 module_put(THIS_MODULE);
672} 670}
673 671
674
675static struct atmdev_ops atmarpd_dev_ops = { 672static struct atmdev_ops atmarpd_dev_ops = {
676 .close = atmarpd_close 673 .close = atmarpd_close
677}; 674};
@@ -693,11 +690,11 @@ static int atm_init_atmarp(struct atm_vcc *vcc)
693 return -EADDRINUSE; 690 return -EADDRINUSE;
694 } 691 }
695 692
696 mod_timer(&idle_timer, jiffies+CLIP_CHECK_INTERVAL*HZ); 693 mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
697 694
698 atmarpd = vcc; 695 atmarpd = vcc;
699 set_bit(ATM_VF_META,&vcc->flags); 696 set_bit(ATM_VF_META, &vcc->flags);
700 set_bit(ATM_VF_READY,&vcc->flags); 697 set_bit(ATM_VF_READY, &vcc->flags);
701 /* allow replies and avoid getting closed if signaling dies */ 698 /* allow replies and avoid getting closed if signaling dies */
702 vcc->dev = &atmarpd_dev; 699 vcc->dev = &atmarpd_dev;
703 vcc_insert_socket(sk_atm(vcc)); 700 vcc_insert_socket(sk_atm(vcc));
@@ -950,8 +947,7 @@ static int __init atm_clip_init(void)
950 947
951 p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops); 948 p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops);
952 if (!p) { 949 if (!p) {
953 printk(KERN_ERR "Unable to initialize " 950 pr_err("Unable to initialize /proc/net/atm/arp\n");
954 "/proc/net/atm/arp\n");
955 atm_clip_exit_noproc(); 951 atm_clip_exit_noproc();
956 return -ENOMEM; 952 return -ENOMEM;
957 } 953 }
diff --git a/net/atm/common.c b/net/atm/common.c
index d61e051e0a3..74d095a081e 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -2,6 +2,7 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
5 6
6#include <linux/module.h> 7#include <linux/module.h>
7#include <linux/kmod.h> 8#include <linux/kmod.h>
@@ -18,11 +19,10 @@
18#include <linux/bitops.h> 19#include <linux/bitops.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <net/sock.h> /* struct sock */ 21#include <net/sock.h> /* struct sock */
22#include <linux/uaccess.h>
23#include <linux/poll.h>
21 24
22#include <asm/uaccess.h>
23#include <asm/atomic.h> 25#include <asm/atomic.h>
24#include <asm/poll.h>
25
26 26
27#include "resources.h" /* atm_find_dev */ 27#include "resources.h" /* atm_find_dev */
28#include "common.h" /* prototypes */ 28#include "common.h" /* prototypes */
@@ -31,13 +31,15 @@
31#include "signaling.h" /* for WAITING and sigd_attach */ 31#include "signaling.h" /* for WAITING and sigd_attach */
32 32
33struct hlist_head vcc_hash[VCC_HTABLE_SIZE]; 33struct hlist_head vcc_hash[VCC_HTABLE_SIZE];
34EXPORT_SYMBOL(vcc_hash);
35
34DEFINE_RWLOCK(vcc_sklist_lock); 36DEFINE_RWLOCK(vcc_sklist_lock);
37EXPORT_SYMBOL(vcc_sklist_lock);
35 38
36static void __vcc_insert_socket(struct sock *sk) 39static void __vcc_insert_socket(struct sock *sk)
37{ 40{
38 struct atm_vcc *vcc = atm_sk(sk); 41 struct atm_vcc *vcc = atm_sk(sk);
39 struct hlist_head *head = &vcc_hash[vcc->vci & 42 struct hlist_head *head = &vcc_hash[vcc->vci & (VCC_HTABLE_SIZE - 1)];
40 (VCC_HTABLE_SIZE - 1)];
41 sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1); 43 sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1);
42 sk_add_node(sk, head); 44 sk_add_node(sk, head);
43} 45}
@@ -48,6 +50,7 @@ void vcc_insert_socket(struct sock *sk)
48 __vcc_insert_socket(sk); 50 __vcc_insert_socket(sk);
49 write_unlock_irq(&vcc_sklist_lock); 51 write_unlock_irq(&vcc_sklist_lock);
50} 52}
53EXPORT_SYMBOL(vcc_insert_socket);
51 54
52static void vcc_remove_socket(struct sock *sk) 55static void vcc_remove_socket(struct sock *sk)
53{ 56{
@@ -56,37 +59,32 @@ static void vcc_remove_socket(struct sock *sk)
56 write_unlock_irq(&vcc_sklist_lock); 59 write_unlock_irq(&vcc_sklist_lock);
57} 60}
58 61
59 62static struct sk_buff *alloc_tx(struct atm_vcc *vcc, unsigned int size)
60static struct sk_buff *alloc_tx(struct atm_vcc *vcc,unsigned int size)
61{ 63{
62 struct sk_buff *skb; 64 struct sk_buff *skb;
63 struct sock *sk = sk_atm(vcc); 65 struct sock *sk = sk_atm(vcc);
64 66
65 if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) { 67 if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) {
66 pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n", 68 pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n",
67 sk_wmem_alloc_get(sk), size, 69 sk_wmem_alloc_get(sk), size, sk->sk_sndbuf);
68 sk->sk_sndbuf);
69 return NULL; 70 return NULL;
70 } 71 }
71 while (!(skb = alloc_skb(size, GFP_KERNEL))) 72 while (!(skb = alloc_skb(size, GFP_KERNEL)))
72 schedule(); 73 schedule();
73 pr_debug("AlTx %d += %d\n", sk_wmem_alloc_get(sk), skb->truesize); 74 pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
74 atomic_add(skb->truesize, &sk->sk_wmem_alloc); 75 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
75 return skb; 76 return skb;
76} 77}
77 78
78
79EXPORT_SYMBOL(vcc_hash);
80EXPORT_SYMBOL(vcc_sklist_lock);
81EXPORT_SYMBOL(vcc_insert_socket);
82
83static void vcc_sock_destruct(struct sock *sk) 79static void vcc_sock_destruct(struct sock *sk)
84{ 80{
85 if (atomic_read(&sk->sk_rmem_alloc)) 81 if (atomic_read(&sk->sk_rmem_alloc))
86 printk(KERN_DEBUG "vcc_sock_destruct: rmem leakage (%d bytes) detected.\n", atomic_read(&sk->sk_rmem_alloc)); 82 printk(KERN_DEBUG "%s: rmem leakage (%d bytes) detected.\n",
83 __func__, atomic_read(&sk->sk_rmem_alloc));
87 84
88 if (atomic_read(&sk->sk_wmem_alloc)) 85 if (atomic_read(&sk->sk_wmem_alloc))
89 printk(KERN_DEBUG "vcc_sock_destruct: wmem leakage (%d bytes) detected.\n", atomic_read(&sk->sk_wmem_alloc)); 86 printk(KERN_DEBUG "%s: wmem leakage (%d bytes) detected.\n",
87 __func__, atomic_read(&sk->sk_wmem_alloc));
90} 88}
91 89
92static void vcc_def_wakeup(struct sock *sk) 90static void vcc_def_wakeup(struct sock *sk)
@@ -142,8 +140,8 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
142 140
143 vcc = atm_sk(sk); 141 vcc = atm_sk(sk);
144 vcc->dev = NULL; 142 vcc->dev = NULL;
145 memset(&vcc->local,0,sizeof(struct sockaddr_atmsvc)); 143 memset(&vcc->local, 0, sizeof(struct sockaddr_atmsvc));
146 memset(&vcc->remote,0,sizeof(struct sockaddr_atmsvc)); 144 memset(&vcc->remote, 0, sizeof(struct sockaddr_atmsvc));
147 vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */ 145 vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */
148 atomic_set(&sk->sk_wmem_alloc, 1); 146 atomic_set(&sk->sk_wmem_alloc, 1);
149 atomic_set(&sk->sk_rmem_alloc, 0); 147 atomic_set(&sk->sk_rmem_alloc, 0);
@@ -156,7 +154,6 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
156 return 0; 154 return 0;
157} 155}
158 156
159
160static void vcc_destroy_socket(struct sock *sk) 157static void vcc_destroy_socket(struct sock *sk)
161{ 158{
162 struct atm_vcc *vcc = atm_sk(sk); 159 struct atm_vcc *vcc = atm_sk(sk);
@@ -171,7 +168,7 @@ static void vcc_destroy_socket(struct sock *sk)
171 vcc->push(vcc, NULL); /* atmarpd has no push */ 168 vcc->push(vcc, NULL); /* atmarpd has no push */
172 169
173 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 170 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
174 atm_return(vcc,skb->truesize); 171 atm_return(vcc, skb->truesize);
175 kfree_skb(skb); 172 kfree_skb(skb);
176 } 173 }
177 174
@@ -182,7 +179,6 @@ static void vcc_destroy_socket(struct sock *sk)
182 vcc_remove_socket(sk); 179 vcc_remove_socket(sk);
183} 180}
184 181
185
186int vcc_release(struct socket *sock) 182int vcc_release(struct socket *sock)
187{ 183{
188 struct sock *sk = sock->sk; 184 struct sock *sk = sock->sk;
@@ -197,7 +193,6 @@ int vcc_release(struct socket *sock)
197 return 0; 193 return 0;
198} 194}
199 195
200
201void vcc_release_async(struct atm_vcc *vcc, int reply) 196void vcc_release_async(struct atm_vcc *vcc, int reply)
202{ 197{
203 struct sock *sk = sk_atm(vcc); 198 struct sock *sk = sk_atm(vcc);
@@ -208,8 +203,6 @@ void vcc_release_async(struct atm_vcc *vcc, int reply)
208 clear_bit(ATM_VF_WAITING, &vcc->flags); 203 clear_bit(ATM_VF_WAITING, &vcc->flags);
209 sk->sk_state_change(sk); 204 sk->sk_state_change(sk);
210} 205}
211
212
213EXPORT_SYMBOL(vcc_release_async); 206EXPORT_SYMBOL(vcc_release_async);
214 207
215 208
@@ -235,37 +228,37 @@ void atm_dev_release_vccs(struct atm_dev *dev)
235 write_unlock_irq(&vcc_sklist_lock); 228 write_unlock_irq(&vcc_sklist_lock);
236} 229}
237 230
238 231static int adjust_tp(struct atm_trafprm *tp, unsigned char aal)
239static int adjust_tp(struct atm_trafprm *tp,unsigned char aal)
240{ 232{
241 int max_sdu; 233 int max_sdu;
242 234
243 if (!tp->traffic_class) return 0; 235 if (!tp->traffic_class)
236 return 0;
244 switch (aal) { 237 switch (aal) {
245 case ATM_AAL0: 238 case ATM_AAL0:
246 max_sdu = ATM_CELL_SIZE-1; 239 max_sdu = ATM_CELL_SIZE-1;
247 break; 240 break;
248 case ATM_AAL34: 241 case ATM_AAL34:
249 max_sdu = ATM_MAX_AAL34_PDU; 242 max_sdu = ATM_MAX_AAL34_PDU;
250 break; 243 break;
251 default: 244 default:
252 printk(KERN_WARNING "ATM: AAL problems ... " 245 pr_warning("AAL problems ... (%d)\n", aal);
253 "(%d)\n",aal); 246 /* fall through */
254 /* fall through */ 247 case ATM_AAL5:
255 case ATM_AAL5: 248 max_sdu = ATM_MAX_AAL5_PDU;
256 max_sdu = ATM_MAX_AAL5_PDU;
257 } 249 }
258 if (!tp->max_sdu) tp->max_sdu = max_sdu; 250 if (!tp->max_sdu)
259 else if (tp->max_sdu > max_sdu) return -EINVAL; 251 tp->max_sdu = max_sdu;
260 if (!tp->max_cdv) tp->max_cdv = ATM_MAX_CDV; 252 else if (tp->max_sdu > max_sdu)
253 return -EINVAL;
254 if (!tp->max_cdv)
255 tp->max_cdv = ATM_MAX_CDV;
261 return 0; 256 return 0;
262} 257}
263 258
264
265static int check_ci(const struct atm_vcc *vcc, short vpi, int vci) 259static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
266{ 260{
267 struct hlist_head *head = &vcc_hash[vci & 261 struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)];
268 (VCC_HTABLE_SIZE - 1)];
269 struct hlist_node *node; 262 struct hlist_node *node;
270 struct sock *s; 263 struct sock *s;
271 struct atm_vcc *walk; 264 struct atm_vcc *walk;
@@ -289,7 +282,6 @@ static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
289 return 0; 282 return 0;
290} 283}
291 284
292
293static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci) 285static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci)
294{ 286{
295 static short p; /* poor man's per-device cache */ 287 static short p; /* poor man's per-device cache */
@@ -327,14 +319,13 @@ static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci)
327 if ((c == ATM_NOT_RSV_VCI || *vci != ATM_VCI_ANY) && 319 if ((c == ATM_NOT_RSV_VCI || *vci != ATM_VCI_ANY) &&
328 *vpi == ATM_VPI_ANY) { 320 *vpi == ATM_VPI_ANY) {
329 p++; 321 p++;
330 if (p >= 1 << vcc->dev->ci_range.vpi_bits) p = 0; 322 if (p >= 1 << vcc->dev->ci_range.vpi_bits)
323 p = 0;
331 } 324 }
332 } 325 } while (old_p != p || old_c != c);
333 while (old_p != p || old_c != c);
334 return -EADDRINUSE; 326 return -EADDRINUSE;
335} 327}
336 328
337
338static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi, 329static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi,
339 int vci) 330 int vci)
340{ 331{
@@ -362,37 +353,46 @@ static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi,
362 __vcc_insert_socket(sk); 353 __vcc_insert_socket(sk);
363 write_unlock_irq(&vcc_sklist_lock); 354 write_unlock_irq(&vcc_sklist_lock);
364 switch (vcc->qos.aal) { 355 switch (vcc->qos.aal) {
365 case ATM_AAL0: 356 case ATM_AAL0:
366 error = atm_init_aal0(vcc); 357 error = atm_init_aal0(vcc);
367 vcc->stats = &dev->stats.aal0; 358 vcc->stats = &dev->stats.aal0;
368 break; 359 break;
369 case ATM_AAL34: 360 case ATM_AAL34:
370 error = atm_init_aal34(vcc); 361 error = atm_init_aal34(vcc);
371 vcc->stats = &dev->stats.aal34; 362 vcc->stats = &dev->stats.aal34;
372 break; 363 break;
373 case ATM_NO_AAL: 364 case ATM_NO_AAL:
374 /* ATM_AAL5 is also used in the "0 for default" case */ 365 /* ATM_AAL5 is also used in the "0 for default" case */
375 vcc->qos.aal = ATM_AAL5; 366 vcc->qos.aal = ATM_AAL5;
376 /* fall through */ 367 /* fall through */
377 case ATM_AAL5: 368 case ATM_AAL5:
378 error = atm_init_aal5(vcc); 369 error = atm_init_aal5(vcc);
379 vcc->stats = &dev->stats.aal5; 370 vcc->stats = &dev->stats.aal5;
380 break; 371 break;
381 default: 372 default:
382 error = -EPROTOTYPE; 373 error = -EPROTOTYPE;
383 } 374 }
384 if (!error) error = adjust_tp(&vcc->qos.txtp,vcc->qos.aal); 375 if (!error)
385 if (!error) error = adjust_tp(&vcc->qos.rxtp,vcc->qos.aal); 376 error = adjust_tp(&vcc->qos.txtp, vcc->qos.aal);
377 if (!error)
378 error = adjust_tp(&vcc->qos.rxtp, vcc->qos.aal);
386 if (error) 379 if (error)
387 goto fail; 380 goto fail;
388 pr_debug("VCC %d.%d, AAL %d\n",vpi,vci,vcc->qos.aal); 381 pr_debug("VCC %d.%d, AAL %d\n", vpi, vci, vcc->qos.aal);
389 pr_debug(" TX: %d, PCR %d..%d, SDU %d\n",vcc->qos.txtp.traffic_class, 382 pr_debug(" TX: %d, PCR %d..%d, SDU %d\n",
390 vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,vcc->qos.txtp.max_sdu); 383 vcc->qos.txtp.traffic_class,
391 pr_debug(" RX: %d, PCR %d..%d, SDU %d\n",vcc->qos.rxtp.traffic_class, 384 vcc->qos.txtp.min_pcr,
392 vcc->qos.rxtp.min_pcr,vcc->qos.rxtp.max_pcr,vcc->qos.rxtp.max_sdu); 385 vcc->qos.txtp.max_pcr,
386 vcc->qos.txtp.max_sdu);
387 pr_debug(" RX: %d, PCR %d..%d, SDU %d\n",
388 vcc->qos.rxtp.traffic_class,
389 vcc->qos.rxtp.min_pcr,
390 vcc->qos.rxtp.max_pcr,
391 vcc->qos.rxtp.max_sdu);
393 392
394 if (dev->ops->open) { 393 if (dev->ops->open) {
395 if ((error = dev->ops->open(vcc))) 394 error = dev->ops->open(vcc);
395 if (error)
396 goto fail; 396 goto fail;
397 } 397 }
398 return 0; 398 return 0;
@@ -406,14 +406,13 @@ fail_module_put:
406 return error; 406 return error;
407} 407}
408 408
409
410int vcc_connect(struct socket *sock, int itf, short vpi, int vci) 409int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
411{ 410{
412 struct atm_dev *dev; 411 struct atm_dev *dev;
413 struct atm_vcc *vcc = ATM_SD(sock); 412 struct atm_vcc *vcc = ATM_SD(sock);
414 int error; 413 int error;
415 414
416 pr_debug("vcc_connect (vpi %d, vci %d)\n",vpi,vci); 415 pr_debug("(vpi %d, vci %d)\n", vpi, vci);
417 if (sock->state == SS_CONNECTED) 416 if (sock->state == SS_CONNECTED)
418 return -EISCONN; 417 return -EISCONN;
419 if (sock->state != SS_UNCONNECTED) 418 if (sock->state != SS_UNCONNECTED)
@@ -422,30 +421,33 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
422 return -EINVAL; 421 return -EINVAL;
423 422
424 if (vpi != ATM_VPI_UNSPEC && vci != ATM_VCI_UNSPEC) 423 if (vpi != ATM_VPI_UNSPEC && vci != ATM_VCI_UNSPEC)
425 clear_bit(ATM_VF_PARTIAL,&vcc->flags); 424 clear_bit(ATM_VF_PARTIAL, &vcc->flags);
426 else 425 else
427 if (test_bit(ATM_VF_PARTIAL,&vcc->flags)) 426 if (test_bit(ATM_VF_PARTIAL, &vcc->flags))
428 return -EINVAL; 427 return -EINVAL;
429 pr_debug("vcc_connect (TX: cl %d,bw %d-%d,sdu %d; " 428 pr_debug("(TX: cl %d,bw %d-%d,sdu %d; "
430 "RX: cl %d,bw %d-%d,sdu %d,AAL %s%d)\n", 429 "RX: cl %d,bw %d-%d,sdu %d,AAL %s%d)\n",
431 vcc->qos.txtp.traffic_class,vcc->qos.txtp.min_pcr, 430 vcc->qos.txtp.traffic_class, vcc->qos.txtp.min_pcr,
432 vcc->qos.txtp.max_pcr,vcc->qos.txtp.max_sdu, 431 vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_sdu,
433 vcc->qos.rxtp.traffic_class,vcc->qos.rxtp.min_pcr, 432 vcc->qos.rxtp.traffic_class, vcc->qos.rxtp.min_pcr,
434 vcc->qos.rxtp.max_pcr,vcc->qos.rxtp.max_sdu, 433 vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_sdu,
435 vcc->qos.aal == ATM_AAL5 ? "" : vcc->qos.aal == ATM_AAL0 ? "" : 434 vcc->qos.aal == ATM_AAL5 ? "" :
436 " ??? code ",vcc->qos.aal == ATM_AAL0 ? 0 : vcc->qos.aal); 435 vcc->qos.aal == ATM_AAL0 ? "" : " ??? code ",
436 vcc->qos.aal == ATM_AAL0 ? 0 : vcc->qos.aal);
437 if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) 437 if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
438 return -EBADFD; 438 return -EBADFD;
439 if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS || 439 if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS ||
440 vcc->qos.rxtp.traffic_class == ATM_ANYCLASS) 440 vcc->qos.rxtp.traffic_class == ATM_ANYCLASS)
441 return -EINVAL; 441 return -EINVAL;
442 if (likely(itf != ATM_ITF_ANY)) { 442 if (likely(itf != ATM_ITF_ANY)) {
443 dev = try_then_request_module(atm_dev_lookup(itf), "atm-device-%d", itf); 443 dev = try_then_request_module(atm_dev_lookup(itf),
444 "atm-device-%d", itf);
444 } else { 445 } else {
445 dev = NULL; 446 dev = NULL;
446 mutex_lock(&atm_dev_mutex); 447 mutex_lock(&atm_dev_mutex);
447 if (!list_empty(&atm_devs)) { 448 if (!list_empty(&atm_devs)) {
448 dev = list_entry(atm_devs.next, struct atm_dev, dev_list); 449 dev = list_entry(atm_devs.next,
450 struct atm_dev, dev_list);
449 atm_dev_hold(dev); 451 atm_dev_hold(dev);
450 } 452 }
451 mutex_unlock(&atm_dev_mutex); 453 mutex_unlock(&atm_dev_mutex);
@@ -458,13 +460,12 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
458 return error; 460 return error;
459 } 461 }
460 if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) 462 if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC)
461 set_bit(ATM_VF_PARTIAL,&vcc->flags); 463 set_bit(ATM_VF_PARTIAL, &vcc->flags);
462 if (test_bit(ATM_VF_READY,&ATM_SD(sock)->flags)) 464 if (test_bit(ATM_VF_READY, &ATM_SD(sock)->flags))
463 sock->state = SS_CONNECTED; 465 sock->state = SS_CONNECTED;
464 return 0; 466 return 0;
465} 467}
466 468
467
468int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, 469int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
469 size_t size, int flags) 470 size_t size, int flags)
470{ 471{
@@ -478,8 +479,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
478 if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */ 479 if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */
479 return -EOPNOTSUPP; 480 return -EOPNOTSUPP;
480 vcc = ATM_SD(sock); 481 vcc = ATM_SD(sock);
481 if (test_bit(ATM_VF_RELEASED,&vcc->flags) || 482 if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
482 test_bit(ATM_VF_CLOSE,&vcc->flags) || 483 test_bit(ATM_VF_CLOSE, &vcc->flags) ||
483 !test_bit(ATM_VF_READY, &vcc->flags)) 484 !test_bit(ATM_VF_READY, &vcc->flags))
484 return 0; 485 return 0;
485 486
@@ -497,13 +498,12 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
497 if (error) 498 if (error)
498 return error; 499 return error;
499 sock_recv_ts_and_drops(msg, sk, skb); 500 sock_recv_ts_and_drops(msg, sk, skb);
500 pr_debug("RcvM %d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); 501 pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize);
501 atm_return(vcc, skb->truesize); 502 atm_return(vcc, skb->truesize);
502 skb_free_datagram(sk, skb); 503 skb_free_datagram(sk, skb);
503 return copied; 504 return copied;
504} 505}
505 506
506
507int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 507int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
508 size_t total_len) 508 size_t total_len)
509{ 509{
@@ -511,7 +511,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
511 DEFINE_WAIT(wait); 511 DEFINE_WAIT(wait);
512 struct atm_vcc *vcc; 512 struct atm_vcc *vcc;
513 struct sk_buff *skb; 513 struct sk_buff *skb;
514 int eff,error; 514 int eff, error;
515 const void __user *buff; 515 const void __user *buff;
516 int size; 516 int size;
517 517
@@ -550,7 +550,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
550 eff = (size+3) & ~3; /* align to word boundary */ 550 eff = (size+3) & ~3; /* align to word boundary */
551 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 551 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
552 error = 0; 552 error = 0;
553 while (!(skb = alloc_tx(vcc,eff))) { 553 while (!(skb = alloc_tx(vcc, eff))) {
554 if (m->msg_flags & MSG_DONTWAIT) { 554 if (m->msg_flags & MSG_DONTWAIT) {
555 error = -EAGAIN; 555 error = -EAGAIN;
556 break; 556 break;
@@ -560,9 +560,9 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
560 error = -ERESTARTSYS; 560 error = -ERESTARTSYS;
561 break; 561 break;
562 } 562 }
563 if (test_bit(ATM_VF_RELEASED,&vcc->flags) || 563 if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
564 test_bit(ATM_VF_CLOSE,&vcc->flags) || 564 test_bit(ATM_VF_CLOSE, &vcc->flags) ||
565 !test_bit(ATM_VF_READY,&vcc->flags)) { 565 !test_bit(ATM_VF_READY, &vcc->flags)) {
566 error = -EPIPE; 566 error = -EPIPE;
567 send_sig(SIGPIPE, current, 0); 567 send_sig(SIGPIPE, current, 0);
568 break; 568 break;
@@ -574,20 +574,20 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
574 goto out; 574 goto out;
575 skb->dev = NULL; /* for paths shared with net_device interfaces */ 575 skb->dev = NULL; /* for paths shared with net_device interfaces */
576 ATM_SKB(skb)->atm_options = vcc->atm_options; 576 ATM_SKB(skb)->atm_options = vcc->atm_options;
577 if (copy_from_user(skb_put(skb,size),buff,size)) { 577 if (copy_from_user(skb_put(skb, size), buff, size)) {
578 kfree_skb(skb); 578 kfree_skb(skb);
579 error = -EFAULT; 579 error = -EFAULT;
580 goto out; 580 goto out;
581 } 581 }
582 if (eff != size) memset(skb->data+size,0,eff-size); 582 if (eff != size)
583 error = vcc->dev->ops->send(vcc,skb); 583 memset(skb->data + size, 0, eff-size);
584 error = vcc->dev->ops->send(vcc, skb);
584 error = error ? error : size; 585 error = error ? error : size;
585out: 586out:
586 release_sock(sk); 587 release_sock(sk);
587 return error; 588 return error;
588} 589}
589 590
590
591unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait) 591unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
592{ 592{
593 struct sock *sk = sock->sk; 593 struct sock *sk = sock->sk;
@@ -623,8 +623,7 @@ unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
623 return mask; 623 return mask;
624} 624}
625 625
626 626static int atm_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
627static int atm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
628{ 627{
629 int error; 628 int error;
630 629
@@ -636,25 +635,31 @@ static int atm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
636 qos->rxtp.traffic_class != vcc->qos.rxtp.traffic_class || 635 qos->rxtp.traffic_class != vcc->qos.rxtp.traffic_class ||
637 qos->txtp.traffic_class != vcc->qos.txtp.traffic_class) 636 qos->txtp.traffic_class != vcc->qos.txtp.traffic_class)
638 return -EINVAL; 637 return -EINVAL;
639 error = adjust_tp(&qos->txtp,qos->aal); 638 error = adjust_tp(&qos->txtp, qos->aal);
640 if (!error) error = adjust_tp(&qos->rxtp,qos->aal); 639 if (!error)
641 if (error) return error; 640 error = adjust_tp(&qos->rxtp, qos->aal);
642 if (!vcc->dev->ops->change_qos) return -EOPNOTSUPP; 641 if (error)
642 return error;
643 if (!vcc->dev->ops->change_qos)
644 return -EOPNOTSUPP;
643 if (sk_atm(vcc)->sk_family == AF_ATMPVC) 645 if (sk_atm(vcc)->sk_family == AF_ATMPVC)
644 return vcc->dev->ops->change_qos(vcc,qos,ATM_MF_SET); 646 return vcc->dev->ops->change_qos(vcc, qos, ATM_MF_SET);
645 return svc_change_qos(vcc,qos); 647 return svc_change_qos(vcc, qos);
646} 648}
647 649
648
649static int check_tp(const struct atm_trafprm *tp) 650static int check_tp(const struct atm_trafprm *tp)
650{ 651{
651 /* @@@ Should be merged with adjust_tp */ 652 /* @@@ Should be merged with adjust_tp */
652 if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS) return 0; 653 if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS)
654 return 0;
653 if (tp->traffic_class != ATM_UBR && !tp->min_pcr && !tp->pcr && 655 if (tp->traffic_class != ATM_UBR && !tp->min_pcr && !tp->pcr &&
654 !tp->max_pcr) return -EINVAL; 656 !tp->max_pcr)
655 if (tp->min_pcr == ATM_MAX_PCR) return -EINVAL; 657 return -EINVAL;
658 if (tp->min_pcr == ATM_MAX_PCR)
659 return -EINVAL;
656 if (tp->min_pcr && tp->max_pcr && tp->max_pcr != ATM_MAX_PCR && 660 if (tp->min_pcr && tp->max_pcr && tp->max_pcr != ATM_MAX_PCR &&
657 tp->min_pcr > tp->max_pcr) return -EINVAL; 661 tp->min_pcr > tp->max_pcr)
662 return -EINVAL;
658 /* 663 /*
659 * We allow pcr to be outside [min_pcr,max_pcr], because later 664 * We allow pcr to be outside [min_pcr,max_pcr], because later
660 * adjustment may still push it in the valid range. 665 * adjustment may still push it in the valid range.
@@ -662,7 +667,6 @@ static int check_tp(const struct atm_trafprm *tp)
662 return 0; 667 return 0;
663} 668}
664 669
665
666static int check_qos(const struct atm_qos *qos) 670static int check_qos(const struct atm_qos *qos)
667{ 671{
668 int error; 672 int error;
@@ -672,9 +676,11 @@ static int check_qos(const struct atm_qos *qos)
672 if (qos->txtp.traffic_class != qos->rxtp.traffic_class && 676 if (qos->txtp.traffic_class != qos->rxtp.traffic_class &&
673 qos->txtp.traffic_class && qos->rxtp.traffic_class && 677 qos->txtp.traffic_class && qos->rxtp.traffic_class &&
674 qos->txtp.traffic_class != ATM_ANYCLASS && 678 qos->txtp.traffic_class != ATM_ANYCLASS &&
675 qos->rxtp.traffic_class != ATM_ANYCLASS) return -EINVAL; 679 qos->rxtp.traffic_class != ATM_ANYCLASS)
680 return -EINVAL;
676 error = check_tp(&qos->txtp); 681 error = check_tp(&qos->txtp);
677 if (error) return error; 682 if (error)
683 return error;
678 return check_tp(&qos->rxtp); 684 return check_tp(&qos->rxtp);
679} 685}
680 686
@@ -690,37 +696,41 @@ int vcc_setsockopt(struct socket *sock, int level, int optname,
690 696
691 vcc = ATM_SD(sock); 697 vcc = ATM_SD(sock);
692 switch (optname) { 698 switch (optname) {
693 case SO_ATMQOS: 699 case SO_ATMQOS:
694 { 700 {
695 struct atm_qos qos; 701 struct atm_qos qos;
696 702
697 if (copy_from_user(&qos,optval,sizeof(qos))) 703 if (copy_from_user(&qos, optval, sizeof(qos)))
698 return -EFAULT; 704 return -EFAULT;
699 error = check_qos(&qos); 705 error = check_qos(&qos);
700 if (error) return error; 706 if (error)
701 if (sock->state == SS_CONNECTED) 707 return error;
702 return atm_change_qos(vcc,&qos); 708 if (sock->state == SS_CONNECTED)
703 if (sock->state != SS_UNCONNECTED) 709 return atm_change_qos(vcc, &qos);
704 return -EBADFD; 710 if (sock->state != SS_UNCONNECTED)
705 vcc->qos = qos; 711 return -EBADFD;
706 set_bit(ATM_VF_HASQOS,&vcc->flags); 712 vcc->qos = qos;
707 return 0; 713 set_bit(ATM_VF_HASQOS, &vcc->flags);
708 } 714 return 0;
709 case SO_SETCLP:
710 if (get_user(value,(unsigned long __user *)optval))
711 return -EFAULT;
712 if (value) vcc->atm_options |= ATM_ATMOPT_CLP;
713 else vcc->atm_options &= ~ATM_ATMOPT_CLP;
714 return 0;
715 default:
716 if (level == SOL_SOCKET) return -EINVAL;
717 break;
718 } 715 }
719 if (!vcc->dev || !vcc->dev->ops->setsockopt) return -EINVAL; 716 case SO_SETCLP:
720 return vcc->dev->ops->setsockopt(vcc,level,optname,optval,optlen); 717 if (get_user(value, (unsigned long __user *)optval))
718 return -EFAULT;
719 if (value)
720 vcc->atm_options |= ATM_ATMOPT_CLP;
721 else
722 vcc->atm_options &= ~ATM_ATMOPT_CLP;
723 return 0;
724 default:
725 if (level == SOL_SOCKET)
726 return -EINVAL;
727 break;
728 }
729 if (!vcc->dev || !vcc->dev->ops->setsockopt)
730 return -EINVAL;
731 return vcc->dev->ops->setsockopt(vcc, level, optname, optval, optlen);
721} 732}
722 733
723
724int vcc_getsockopt(struct socket *sock, int level, int optname, 734int vcc_getsockopt(struct socket *sock, int level, int optname,
725 char __user *optval, int __user *optlen) 735 char __user *optval, int __user *optlen)
726{ 736{
@@ -734,33 +744,33 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
734 744
735 vcc = ATM_SD(sock); 745 vcc = ATM_SD(sock);
736 switch (optname) { 746 switch (optname) {
737 case SO_ATMQOS: 747 case SO_ATMQOS:
738 if (!test_bit(ATM_VF_HASQOS,&vcc->flags)) 748 if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
739 return -EINVAL; 749 return -EINVAL;
740 return copy_to_user(optval,&vcc->qos,sizeof(vcc->qos)) ? 750 return copy_to_user(optval, &vcc->qos, sizeof(vcc->qos))
741 -EFAULT : 0; 751 ? -EFAULT : 0;
742 case SO_SETCLP: 752 case SO_SETCLP:
743 return put_user(vcc->atm_options & ATM_ATMOPT_CLP ? 1 : 753 return put_user(vcc->atm_options & ATM_ATMOPT_CLP ? 1 : 0,
744 0,(unsigned long __user *)optval) ? -EFAULT : 0; 754 (unsigned long __user *)optval) ? -EFAULT : 0;
745 case SO_ATMPVC: 755 case SO_ATMPVC:
746 { 756 {
747 struct sockaddr_atmpvc pvc; 757 struct sockaddr_atmpvc pvc;
748 758
749 if (!vcc->dev || 759 if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
750 !test_bit(ATM_VF_ADDR,&vcc->flags)) 760 return -ENOTCONN;
751 return -ENOTCONN; 761 pvc.sap_family = AF_ATMPVC;
752 pvc.sap_family = AF_ATMPVC; 762 pvc.sap_addr.itf = vcc->dev->number;
753 pvc.sap_addr.itf = vcc->dev->number; 763 pvc.sap_addr.vpi = vcc->vpi;
754 pvc.sap_addr.vpi = vcc->vpi; 764 pvc.sap_addr.vci = vcc->vci;
755 pvc.sap_addr.vci = vcc->vci; 765 return copy_to_user(optval, &pvc, sizeof(pvc)) ? -EFAULT : 0;
756 return copy_to_user(optval,&pvc,sizeof(pvc)) ? 766 }
757 -EFAULT : 0; 767 default:
758 } 768 if (level == SOL_SOCKET)
759 default: 769 return -EINVAL;
760 if (level == SOL_SOCKET) return -EINVAL;
761 break; 770 break;
762 } 771 }
763 if (!vcc->dev || !vcc->dev->ops->getsockopt) return -EINVAL; 772 if (!vcc->dev || !vcc->dev->ops->getsockopt)
773 return -EINVAL;
764 return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len); 774 return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len);
765} 775}
766 776
@@ -768,23 +778,27 @@ static int __init atm_init(void)
768{ 778{
769 int error; 779 int error;
770 780
771 if ((error = proto_register(&vcc_proto, 0)) < 0) 781 error = proto_register(&vcc_proto, 0);
782 if (error < 0)
772 goto out; 783 goto out;
773 784 error = atmpvc_init();
774 if ((error = atmpvc_init()) < 0) { 785 if (error < 0) {
775 printk(KERN_ERR "atmpvc_init() failed with %d\n", error); 786 pr_err("atmpvc_init() failed with %d\n", error);
776 goto out_unregister_vcc_proto; 787 goto out_unregister_vcc_proto;
777 } 788 }
778 if ((error = atmsvc_init()) < 0) { 789 error = atmsvc_init();
779 printk(KERN_ERR "atmsvc_init() failed with %d\n", error); 790 if (error < 0) {
791 pr_err("atmsvc_init() failed with %d\n", error);
780 goto out_atmpvc_exit; 792 goto out_atmpvc_exit;
781 } 793 }
782 if ((error = atm_proc_init()) < 0) { 794 error = atm_proc_init();
783 printk(KERN_ERR "atm_proc_init() failed with %d\n",error); 795 if (error < 0) {
796 pr_err("atm_proc_init() failed with %d\n", error);
784 goto out_atmsvc_exit; 797 goto out_atmsvc_exit;
785 } 798 }
786 if ((error = atm_sysfs_init()) < 0) { 799 error = atm_sysfs_init();
787 printk(KERN_ERR "atm_sysfs_init() failed with %d\n",error); 800 if (error < 0) {
801 pr_err("atm_sysfs_init() failed with %d\n", error);
788 goto out_atmproc_exit; 802 goto out_atmproc_exit;
789 } 803 }
790out: 804out:
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index 2ea40995dce..62dc8bfe6fe 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -3,6 +3,7 @@
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4/* 2003 John Levon <levon@movementarian.org> */ 4/* 2003 John Levon <levon@movementarian.org> */
5 5
6#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
6 7
7#include <linux/module.h> 8#include <linux/module.h>
8#include <linux/kmod.h> 9#include <linux/kmod.h>
@@ -36,6 +37,7 @@ void register_atm_ioctl(struct atm_ioctl *ioctl)
36 list_add_tail(&ioctl->list, &ioctl_list); 37 list_add_tail(&ioctl->list, &ioctl_list);
37 mutex_unlock(&ioctl_mutex); 38 mutex_unlock(&ioctl_mutex);
38} 39}
40EXPORT_SYMBOL(register_atm_ioctl);
39 41
40void deregister_atm_ioctl(struct atm_ioctl *ioctl) 42void deregister_atm_ioctl(struct atm_ioctl *ioctl)
41{ 43{
@@ -43,129 +45,128 @@ void deregister_atm_ioctl(struct atm_ioctl *ioctl)
43 list_del(&ioctl->list); 45 list_del(&ioctl->list);
44 mutex_unlock(&ioctl_mutex); 46 mutex_unlock(&ioctl_mutex);
45} 47}
46
47EXPORT_SYMBOL(register_atm_ioctl);
48EXPORT_SYMBOL(deregister_atm_ioctl); 48EXPORT_SYMBOL(deregister_atm_ioctl);
49 49
50static int do_vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg, int compat) 50static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
51 unsigned long arg, int compat)
51{ 52{
52 struct sock *sk = sock->sk; 53 struct sock *sk = sock->sk;
53 struct atm_vcc *vcc; 54 struct atm_vcc *vcc;
54 int error; 55 int error;
55 struct list_head * pos; 56 struct list_head *pos;
56 void __user *argp = (void __user *)arg; 57 void __user *argp = (void __user *)arg;
57 58
58 vcc = ATM_SD(sock); 59 vcc = ATM_SD(sock);
59 switch (cmd) { 60 switch (cmd) {
60 case SIOCOUTQ: 61 case SIOCOUTQ:
61 if (sock->state != SS_CONNECTED || 62 if (sock->state != SS_CONNECTED ||
62 !test_bit(ATM_VF_READY, &vcc->flags)) { 63 !test_bit(ATM_VF_READY, &vcc->flags)) {
63 error = -EINVAL; 64 error = -EINVAL;
64 goto done; 65 goto done;
65 } 66 }
66 error = put_user(sk->sk_sndbuf - sk_wmem_alloc_get(sk), 67 error = put_user(sk->sk_sndbuf - sk_wmem_alloc_get(sk),
67 (int __user *) argp) ? -EFAULT : 0; 68 (int __user *)argp) ? -EFAULT : 0;
69 goto done;
70 case SIOCINQ:
71 {
72 struct sk_buff *skb;
73
74 if (sock->state != SS_CONNECTED) {
75 error = -EINVAL;
68 goto done; 76 goto done;
69 case SIOCINQ: 77 }
70 { 78 skb = skb_peek(&sk->sk_receive_queue);
71 struct sk_buff *skb; 79 error = put_user(skb ? skb->len : 0,
72 80 (int __user *)argp) ? -EFAULT : 0;
73 if (sock->state != SS_CONNECTED) { 81 goto done;
74 error = -EINVAL; 82 }
75 goto done; 83 case SIOCGSTAMP: /* borrowed from IP */
76 }
77 skb = skb_peek(&sk->sk_receive_queue);
78 error = put_user(skb ? skb->len : 0,
79 (int __user *)argp) ? -EFAULT : 0;
80 goto done;
81 }
82 case SIOCGSTAMP: /* borrowed from IP */
83#ifdef CONFIG_COMPAT 84#ifdef CONFIG_COMPAT
84 if (compat) 85 if (compat)
85 error = compat_sock_get_timestamp(sk, argp); 86 error = compat_sock_get_timestamp(sk, argp);
86 else 87 else
87#endif 88#endif
88 error = sock_get_timestamp(sk, argp); 89 error = sock_get_timestamp(sk, argp);
89 goto done; 90 goto done;
90 case SIOCGSTAMPNS: /* borrowed from IP */ 91 case SIOCGSTAMPNS: /* borrowed from IP */
91#ifdef CONFIG_COMPAT 92#ifdef CONFIG_COMPAT
92 if (compat) 93 if (compat)
93 error = compat_sock_get_timestampns(sk, argp); 94 error = compat_sock_get_timestampns(sk, argp);
94 else 95 else
95#endif 96#endif
96 error = sock_get_timestampns(sk, argp); 97 error = sock_get_timestampns(sk, argp);
98 goto done;
99 case ATM_SETSC:
100 if (net_ratelimit())
101 pr_warning("ATM_SETSC is obsolete; used by %s:%d\n",
102 current->comm, task_pid_nr(current));
103 error = 0;
104 goto done;
105 case ATMSIGD_CTRL:
106 if (!capable(CAP_NET_ADMIN)) {
107 error = -EPERM;
97 goto done; 108 goto done;
98 case ATM_SETSC: 109 }
99 if (net_ratelimit()) 110 /*
100 printk(KERN_WARNING "ATM_SETSC is obsolete; used by %s:%d\n", 111 * The user/kernel protocol for exchanging signalling
101 current->comm, task_pid_nr(current)); 112 * info uses kernel pointers as opaque references,
102 error = 0; 113 * so the holder of the file descriptor can scribble
114 * on the kernel... so we should make sure that we
115 * have the same privileges that /proc/kcore needs
116 */
117 if (!capable(CAP_SYS_RAWIO)) {
118 error = -EPERM;
103 goto done; 119 goto done;
104 case ATMSIGD_CTRL: 120 }
105 if (!capable(CAP_NET_ADMIN)) {
106 error = -EPERM;
107 goto done;
108 }
109 /*
110 * The user/kernel protocol for exchanging signalling
111 * info uses kernel pointers as opaque references,
112 * so the holder of the file descriptor can scribble
113 * on the kernel... so we should make sure that we
114 * have the same privileges that /proc/kcore needs
115 */
116 if (!capable(CAP_SYS_RAWIO)) {
117 error = -EPERM;
118 goto done;
119 }
120#ifdef CONFIG_COMPAT 121#ifdef CONFIG_COMPAT
121 /* WTF? I don't even want to _think_ about making this 122 /* WTF? I don't even want to _think_ about making this
122 work for 32-bit userspace. TBH I don't really want 123 work for 32-bit userspace. TBH I don't really want
123 to think about it at all. dwmw2. */ 124 to think about it at all. dwmw2. */
124 if (compat) { 125 if (compat) {
125 if (net_ratelimit()) 126 if (net_ratelimit())
126 printk(KERN_WARNING "32-bit task cannot be atmsigd\n"); 127 pr_warning("32-bit task cannot be atmsigd\n");
127 error = -EINVAL; 128 error = -EINVAL;
128 goto done; 129 goto done;
129 } 130 }
130#endif 131#endif
131 error = sigd_attach(vcc); 132 error = sigd_attach(vcc);
132 if (!error) 133 if (!error)
133 sock->state = SS_CONNECTED; 134 sock->state = SS_CONNECTED;
135 goto done;
136 case ATM_SETBACKEND:
137 case ATM_NEWBACKENDIF:
138 {
139 atm_backend_t backend;
140 error = get_user(backend, (atm_backend_t __user *)argp);
141 if (error)
134 goto done; 142 goto done;
135 case ATM_SETBACKEND: 143 switch (backend) {
136 case ATM_NEWBACKENDIF: 144 case ATM_BACKEND_PPP:
137 { 145 request_module("pppoatm");
138 atm_backend_t backend;
139 error = get_user(backend, (atm_backend_t __user *) argp);
140 if (error)
141 goto done;
142 switch (backend) {
143 case ATM_BACKEND_PPP:
144 request_module("pppoatm");
145 break;
146 case ATM_BACKEND_BR2684:
147 request_module("br2684");
148 break;
149 }
150 }
151 break;
152 case ATMMPC_CTRL:
153 case ATMMPC_DATA:
154 request_module("mpoa");
155 break;
156 case ATMARPD_CTRL:
157 request_module("clip");
158 break; 146 break;
159 case ATMLEC_CTRL: 147 case ATM_BACKEND_BR2684:
160 request_module("lec"); 148 request_module("br2684");
161 break; 149 break;
150 }
151 break;
152 }
153 case ATMMPC_CTRL:
154 case ATMMPC_DATA:
155 request_module("mpoa");
156 break;
157 case ATMARPD_CTRL:
158 request_module("clip");
159 break;
160 case ATMLEC_CTRL:
161 request_module("lec");
162 break;
162 } 163 }
163 164
164 error = -ENOIOCTLCMD; 165 error = -ENOIOCTLCMD;
165 166
166 mutex_lock(&ioctl_mutex); 167 mutex_lock(&ioctl_mutex);
167 list_for_each(pos, &ioctl_list) { 168 list_for_each(pos, &ioctl_list) {
168 struct atm_ioctl * ic = list_entry(pos, struct atm_ioctl, list); 169 struct atm_ioctl *ic = list_entry(pos, struct atm_ioctl, list);
169 if (try_module_get(ic->owner)) { 170 if (try_module_get(ic->owner)) {
170 error = ic->ioctl(sock, cmd, arg); 171 error = ic->ioctl(sock, cmd, arg);
171 module_put(ic->owner); 172 module_put(ic->owner);
@@ -184,7 +185,6 @@ done:
184 return error; 185 return error;
185} 186}
186 187
187
188int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 188int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
189{ 189{
190 return do_vcc_ioctl(sock, cmd, arg, 0); 190 return do_vcc_ioctl(sock, cmd, arg, 0);
@@ -287,8 +287,8 @@ static int do_atmif_sioc(struct socket *sock, unsigned int cmd,
287 sioc = compat_alloc_user_space(sizeof(*sioc)); 287 sioc = compat_alloc_user_space(sizeof(*sioc));
288 sioc32 = compat_ptr(arg); 288 sioc32 = compat_ptr(arg);
289 289
290 if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int)) 290 if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int)) ||
291 || get_user(data, &sioc32->arg)) 291 get_user(data, &sioc32->arg))
292 return -EFAULT; 292 return -EFAULT;
293 datap = compat_ptr(data); 293 datap = compat_ptr(data);
294 if (put_user(datap, &sioc->arg)) 294 if (put_user(datap, &sioc->arg))
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 42749b7b917..5da5753157f 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -4,6 +4,8 @@
4 * Marko Kiiskila <mkiiskila@yahoo.com> 4 * Marko Kiiskila <mkiiskila@yahoo.com>
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
8
7#include <linux/kernel.h> 9#include <linux/kernel.h>
8#include <linux/bitops.h> 10#include <linux/bitops.h>
9#include <linux/capability.h> 11#include <linux/capability.h>
@@ -16,7 +18,7 @@
16#include <linux/skbuff.h> 18#include <linux/skbuff.h>
17#include <linux/ip.h> 19#include <linux/ip.h>
18#include <asm/byteorder.h> 20#include <asm/byteorder.h>
19#include <asm/uaccess.h> 21#include <linux/uaccess.h>
20#include <net/arp.h> 22#include <net/arp.h>
21#include <net/dst.h> 23#include <net/dst.h>
22#include <linux/proc_fs.h> 24#include <linux/proc_fs.h>
@@ -85,17 +87,19 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
85 int is_rdesc, 87 int is_rdesc,
86 struct lec_arp_table **ret_entry); 88 struct lec_arp_table **ret_entry);
87static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr, 89static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
88 const unsigned char *atm_addr, unsigned long remoteflag, 90 const unsigned char *atm_addr,
91 unsigned long remoteflag,
89 unsigned int targetless_le_arp); 92 unsigned int targetless_le_arp);
90static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id); 93static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id);
91static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc); 94static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc);
92static void lec_set_flush_tran_id(struct lec_priv *priv, 95static void lec_set_flush_tran_id(struct lec_priv *priv,
93 const unsigned char *atm_addr, 96 const unsigned char *atm_addr,
94 unsigned long tran_id); 97 unsigned long tran_id);
95static void lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data, 98static void lec_vcc_added(struct lec_priv *priv,
99 const struct atmlec_ioc *ioc_data,
96 struct atm_vcc *vcc, 100 struct atm_vcc *vcc,
97 void (*old_push) (struct atm_vcc *vcc, 101 void (*old_push)(struct atm_vcc *vcc,
98 struct sk_buff *skb)); 102 struct sk_buff *skb));
99static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc); 103static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc);
100 104
101/* must be done under lec_arp_lock */ 105/* must be done under lec_arp_lock */
@@ -110,7 +114,6 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
110 kfree(entry); 114 kfree(entry);
111} 115}
112 116
113
114static struct lane2_ops lane2_ops = { 117static struct lane2_ops lane2_ops = {
115 lane2_resolve, /* resolve, spec 3.1.3 */ 118 lane2_resolve, /* resolve, spec 3.1.3 */
116 lane2_associate_req, /* associate_req, spec 3.1.4 */ 119 lane2_associate_req, /* associate_req, spec 3.1.4 */
@@ -148,7 +151,8 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
148 mesg = (struct atmlec_msg *)skb2->data; 151 mesg = (struct atmlec_msg *)skb2->data;
149 mesg->type = l_topology_change; 152 mesg->type = l_topology_change;
150 buff += 4; 153 buff += 4;
151 mesg->content.normal.flag = *buff & 0x01; /* 0x01 is topology change */ 154 mesg->content.normal.flag = *buff & 0x01;
155 /* 0x01 is topology change */
152 156
153 priv = netdev_priv(dev); 157 priv = netdev_priv(dev);
154 atm_force_charge(priv->lecd, skb2->truesize); 158 atm_force_charge(priv->lecd, skb2->truesize);
@@ -242,7 +246,7 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
242 246
243static void lec_tx_timeout(struct net_device *dev) 247static void lec_tx_timeout(struct net_device *dev)
244{ 248{
245 printk(KERN_INFO "%s: tx timeout\n", dev->name); 249 pr_info("%s\n", dev->name);
246 dev->trans_start = jiffies; 250 dev->trans_start = jiffies;
247 netif_wake_queue(dev); 251 netif_wake_queue(dev);
248} 252}
@@ -261,14 +265,10 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
261 unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */ 265 unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */
262#endif 266#endif
263 int is_rdesc; 267 int is_rdesc;
264#if DUMP_PACKETS > 0
265 char buf[300];
266 int i = 0;
267#endif /* DUMP_PACKETS >0 */
268 268
269 pr_debug("lec_start_xmit called\n"); 269 pr_debug("called\n");
270 if (!priv->lecd) { 270 if (!priv->lecd) {
271 printk("%s:No lecd attached\n", dev->name); 271 pr_info("%s:No lecd attached\n", dev->name);
272 dev->stats.tx_errors++; 272 dev->stats.tx_errors++;
273 netif_stop_queue(dev); 273 netif_stop_queue(dev);
274 kfree_skb(skb); 274 kfree_skb(skb);
@@ -276,8 +276,8 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
276 } 276 }
277 277
278 pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n", 278 pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n",
279 (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb), 279 (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb),
280 (long)skb_end_pointer(skb)); 280 (long)skb_end_pointer(skb));
281#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) 281#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
282 if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0) 282 if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0)
283 lec_handle_bridge(skb, dev); 283 lec_handle_bridge(skb, dev);
@@ -285,8 +285,7 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
285 285
286 /* Make sure we have room for lec_id */ 286 /* Make sure we have room for lec_id */
287 if (skb_headroom(skb) < 2) { 287 if (skb_headroom(skb) < 2) {
288 288 pr_debug("reallocating skb\n");
289 pr_debug("lec_start_xmit: reallocating skb\n");
290 skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); 289 skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
291 kfree_skb(skb); 290 kfree_skb(skb);
292 if (skb2 == NULL) 291 if (skb2 == NULL)
@@ -313,23 +312,17 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
313 } 312 }
314#endif 313#endif
315 314
316#if DUMP_PACKETS > 0
317 printk("%s: send datalen:%ld lecid:%4.4x\n", dev->name,
318 skb->len, priv->lecid);
319#if DUMP_PACKETS >= 2 315#if DUMP_PACKETS >= 2
320 for (i = 0; i < skb->len && i < 99; i++) { 316#define MAX_DUMP_SKB 99
321 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
322 }
323#elif DUMP_PACKETS >= 1 317#elif DUMP_PACKETS >= 1
324 for (i = 0; i < skb->len && i < 30; i++) { 318#define MAX_DUMP_SKB 30
325 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]); 319#endif
326 } 320#if DUMP_PACKETS >= 1
321 printk(KERN_DEBUG "%s: send datalen:%ld lecid:%4.4x\n",
322 dev->name, skb->len, priv->lecid);
323 print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1,
324 skb->data, min(skb->len, MAX_DUMP_SKB), true);
327#endif /* DUMP_PACKETS >= 1 */ 325#endif /* DUMP_PACKETS >= 1 */
328 if (i == skb->len)
329 printk("%s\n", buf);
330 else
331 printk("%s...\n", buf);
332#endif /* DUMP_PACKETS > 0 */
333 326
334 /* Minimum ethernet-frame size */ 327 /* Minimum ethernet-frame size */
335#ifdef CONFIG_TR 328#ifdef CONFIG_TR
@@ -367,31 +360,28 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
367#endif 360#endif
368 entry = NULL; 361 entry = NULL;
369 vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry); 362 vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry);
370 pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n", dev->name, 363 pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n",
371 vcc, vcc ? vcc->flags : 0, entry); 364 dev->name, vcc, vcc ? vcc->flags : 0, entry);
372 if (!vcc || !test_bit(ATM_VF_READY, &vcc->flags)) { 365 if (!vcc || !test_bit(ATM_VF_READY, &vcc->flags)) {
373 if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) { 366 if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) {
374 pr_debug("%s:lec_start_xmit: queuing packet, ", 367 pr_debug("%s:queuing packet, MAC address %pM\n",
375 dev->name); 368 dev->name, lec_h->h_dest);
376 pr_debug("MAC address %pM\n", lec_h->h_dest);
377 skb_queue_tail(&entry->tx_wait, skb); 369 skb_queue_tail(&entry->tx_wait, skb);
378 } else { 370 } else {
379 pr_debug 371 pr_debug("%s:tx queue full or no arp entry, dropping, MAC address: %pM\n",
380 ("%s:lec_start_xmit: tx queue full or no arp entry, dropping, ", 372 dev->name, lec_h->h_dest);
381 dev->name);
382 pr_debug("MAC address %pM\n", lec_h->h_dest);
383 dev->stats.tx_dropped++; 373 dev->stats.tx_dropped++;
384 dev_kfree_skb(skb); 374 dev_kfree_skb(skb);
385 } 375 }
386 goto out; 376 goto out;
387 } 377 }
388#if DUMP_PACKETS > 0 378#if DUMP_PACKETS > 0
389 printk("%s:sending to vpi:%d vci:%d\n", dev->name, vcc->vpi, vcc->vci); 379 printk(KERN_DEBUG "%s:sending to vpi:%d vci:%d\n",
380 dev->name, vcc->vpi, vcc->vci);
390#endif /* DUMP_PACKETS > 0 */ 381#endif /* DUMP_PACKETS > 0 */
391 382
392 while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) { 383 while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) {
393 pr_debug("lec.c: emptying tx queue, "); 384 pr_debug("emptying tx queue, MAC address %pM\n", lec_h->h_dest);
394 pr_debug("MAC address %pM\n", lec_h->h_dest);
395 lec_send(vcc, skb2); 385 lec_send(vcc, skb2);
396 } 386 }
397 387
@@ -444,14 +434,12 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
444 pr_debug("%s: msg from zeppelin:%d\n", dev->name, mesg->type); 434 pr_debug("%s: msg from zeppelin:%d\n", dev->name, mesg->type);
445 switch (mesg->type) { 435 switch (mesg->type) {
446 case l_set_mac_addr: 436 case l_set_mac_addr:
447 for (i = 0; i < 6; i++) { 437 for (i = 0; i < 6; i++)
448 dev->dev_addr[i] = mesg->content.normal.mac_addr[i]; 438 dev->dev_addr[i] = mesg->content.normal.mac_addr[i];
449 }
450 break; 439 break;
451 case l_del_mac_addr: 440 case l_del_mac_addr:
452 for (i = 0; i < 6; i++) { 441 for (i = 0; i < 6; i++)
453 dev->dev_addr[i] = 0; 442 dev->dev_addr[i] = 0;
454 }
455 break; 443 break;
456 case l_addr_delete: 444 case l_addr_delete:
457 lec_addr_delete(priv, mesg->content.normal.atm_addr, 445 lec_addr_delete(priv, mesg->content.normal.atm_addr,
@@ -477,10 +465,10 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
477 mesg->content.normal.atm_addr, 465 mesg->content.normal.atm_addr,
478 mesg->content.normal.flag, 466 mesg->content.normal.flag,
479 mesg->content.normal.targetless_le_arp); 467 mesg->content.normal.targetless_le_arp);
480 pr_debug("lec: in l_arp_update\n"); 468 pr_debug("in l_arp_update\n");
481 if (mesg->sizeoftlvs != 0) { /* LANE2 3.1.5 */ 469 if (mesg->sizeoftlvs != 0) { /* LANE2 3.1.5 */
482 pr_debug("lec: LANE2 3.1.5, got tlvs, size %d\n", 470 pr_debug("LANE2 3.1.5, got tlvs, size %d\n",
483 mesg->sizeoftlvs); 471 mesg->sizeoftlvs);
484 lane2_associate_ind(dev, mesg->content.normal.mac_addr, 472 lane2_associate_ind(dev, mesg->content.normal.mac_addr,
485 tmp, mesg->sizeoftlvs); 473 tmp, mesg->sizeoftlvs);
486 } 474 }
@@ -499,13 +487,14 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
499 priv->flush_timeout = (mesg->content.config.flush_timeout * HZ); 487 priv->flush_timeout = (mesg->content.config.flush_timeout * HZ);
500 priv->path_switching_delay = 488 priv->path_switching_delay =
501 (mesg->content.config.path_switching_delay * HZ); 489 (mesg->content.config.path_switching_delay * HZ);
502 priv->lane_version = mesg->content.config.lane_version; /* LANE2 */ 490 priv->lane_version = mesg->content.config.lane_version;
491 /* LANE2 */
503 priv->lane2_ops = NULL; 492 priv->lane2_ops = NULL;
504 if (priv->lane_version > 1) 493 if (priv->lane_version > 1)
505 priv->lane2_ops = &lane2_ops; 494 priv->lane2_ops = &lane2_ops;
506 if (dev_set_mtu(dev, mesg->content.config.mtu)) 495 if (dev_set_mtu(dev, mesg->content.config.mtu))
507 printk("%s: change_mtu to %d failed\n", dev->name, 496 pr_info("%s: change_mtu to %d failed\n",
508 mesg->content.config.mtu); 497 dev->name, mesg->content.config.mtu);
509 priv->is_proxy = mesg->content.config.is_proxy; 498 priv->is_proxy = mesg->content.config.is_proxy;
510 break; 499 break;
511 case l_flush_tran_id: 500 case l_flush_tran_id:
@@ -518,40 +507,35 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
518 break; 507 break;
519 case l_should_bridge: 508 case l_should_bridge:
520#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) 509#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
521 { 510 {
522 pr_debug("%s: bridge zeppelin asks about %pM\n", 511 pr_debug("%s: bridge zeppelin asks about %pM\n",
523 dev->name, mesg->content.proxy.mac_addr); 512 dev->name, mesg->content.proxy.mac_addr);
524 513
525 if (br_fdb_test_addr_hook == NULL) 514 if (br_fdb_test_addr_hook == NULL)
526 break; 515 break;
527 516
528 if (br_fdb_test_addr_hook(dev, 517 if (br_fdb_test_addr_hook(dev, mesg->content.proxy.mac_addr)) {
529 mesg->content.proxy.mac_addr)) { 518 /* hit from bridge table, send LE_ARP_RESPONSE */
530 /* hit from bridge table, send LE_ARP_RESPONSE */ 519 struct sk_buff *skb2;
531 struct sk_buff *skb2; 520 struct sock *sk;
532 struct sock *sk; 521
533 522 pr_debug("%s: entry found, responding to zeppelin\n",
534 pr_debug 523 dev->name);
535 ("%s: entry found, responding to zeppelin\n", 524 skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
536 dev->name); 525 if (skb2 == NULL)
537 skb2 = 526 break;
538 alloc_skb(sizeof(struct atmlec_msg), 527 skb2->len = sizeof(struct atmlec_msg);
539 GFP_ATOMIC); 528 skb_copy_to_linear_data(skb2, mesg, sizeof(*mesg));
540 if (skb2 == NULL) 529 atm_force_charge(priv->lecd, skb2->truesize);
541 break; 530 sk = sk_atm(priv->lecd);
542 skb2->len = sizeof(struct atmlec_msg); 531 skb_queue_tail(&sk->sk_receive_queue, skb2);
543 skb_copy_to_linear_data(skb2, mesg, 532 sk->sk_data_ready(sk, skb2->len);
544 sizeof(*mesg));
545 atm_force_charge(priv->lecd, skb2->truesize);
546 sk = sk_atm(priv->lecd);
547 skb_queue_tail(&sk->sk_receive_queue, skb2);
548 sk->sk_data_ready(sk, skb2->len);
549 }
550 } 533 }
534 }
551#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */ 535#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
552 break; 536 break;
553 default: 537 default:
554 printk("%s: Unknown message type %d\n", dev->name, mesg->type); 538 pr_info("%s: Unknown message type %d\n", dev->name, mesg->type);
555 dev_kfree_skb(skb); 539 dev_kfree_skb(skb);
556 return -EINVAL; 540 return -EINVAL;
557 } 541 }
@@ -572,14 +556,13 @@ static void lec_atm_close(struct atm_vcc *vcc)
572 lec_arp_destroy(priv); 556 lec_arp_destroy(priv);
573 557
574 if (skb_peek(&sk_atm(vcc)->sk_receive_queue)) 558 if (skb_peek(&sk_atm(vcc)->sk_receive_queue))
575 printk("%s lec_atm_close: closing with messages pending\n", 559 pr_info("%s closing with messages pending\n", dev->name);
576 dev->name); 560 while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue))) {
577 while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue)) != NULL) {
578 atm_return(vcc, skb->truesize); 561 atm_return(vcc, skb->truesize);
579 dev_kfree_skb(skb); 562 dev_kfree_skb(skb);
580 } 563 }
581 564
582 printk("%s: Shut down!\n", dev->name); 565 pr_info("%s: Shut down!\n", dev->name);
583 module_put(THIS_MODULE); 566 module_put(THIS_MODULE);
584} 567}
585 568
@@ -608,9 +591,8 @@ send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
608 struct sk_buff *skb; 591 struct sk_buff *skb;
609 struct atmlec_msg *mesg; 592 struct atmlec_msg *mesg;
610 593
611 if (!priv || !priv->lecd) { 594 if (!priv || !priv->lecd)
612 return -1; 595 return -1;
613 }
614 skb = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC); 596 skb = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
615 if (!skb) 597 if (!skb)
616 return -1; 598 return -1;
@@ -633,7 +615,7 @@ send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
633 sk->sk_data_ready(sk, skb->len); 615 sk->sk_data_ready(sk, skb->len);
634 616
635 if (data != NULL) { 617 if (data != NULL) {
636 pr_debug("lec: about to send %d bytes of data\n", data->len); 618 pr_debug("about to send %d bytes of data\n", data->len);
637 atm_force_charge(priv->lecd, data->truesize); 619 atm_force_charge(priv->lecd, data->truesize);
638 skb_queue_tail(&sk->sk_receive_queue, data); 620 skb_queue_tail(&sk->sk_receive_queue, data);
639 sk->sk_data_ready(sk, skb->len); 621 sk->sk_data_ready(sk, skb->len);
@@ -691,36 +673,28 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
691 struct net_device *dev = (struct net_device *)vcc->proto_data; 673 struct net_device *dev = (struct net_device *)vcc->proto_data;
692 struct lec_priv *priv = netdev_priv(dev); 674 struct lec_priv *priv = netdev_priv(dev);
693 675
694#if DUMP_PACKETS >0 676#if DUMP_PACKETS > 0
695 int i = 0; 677 printk(KERN_DEBUG "%s: vcc vpi:%d vci:%d\n",
696 char buf[300]; 678 dev->name, vcc->vpi, vcc->vci);
697
698 printk("%s: lec_push vcc vpi:%d vci:%d\n", dev->name,
699 vcc->vpi, vcc->vci);
700#endif 679#endif
701 if (!skb) { 680 if (!skb) {
702 pr_debug("%s: null skb\n", dev->name); 681 pr_debug("%s: null skb\n", dev->name);
703 lec_vcc_close(priv, vcc); 682 lec_vcc_close(priv, vcc);
704 return; 683 return;
705 } 684 }
706#if DUMP_PACKETS > 0
707 printk("%s: rcv datalen:%ld lecid:%4.4x\n", dev->name,
708 skb->len, priv->lecid);
709#if DUMP_PACKETS >= 2 685#if DUMP_PACKETS >= 2
710 for (i = 0; i < skb->len && i < 99; i++) { 686#define MAX_SKB_DUMP 99
711 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
712 }
713#elif DUMP_PACKETS >= 1 687#elif DUMP_PACKETS >= 1
714 for (i = 0; i < skb->len && i < 30; i++) { 688#define MAX_SKB_DUMP 30
715 sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]); 689#endif
716 } 690#if DUMP_PACKETS > 0
717#endif /* DUMP_PACKETS >= 1 */ 691 printk(KERN_DEBUG "%s: rcv datalen:%ld lecid:%4.4x\n",
718 if (i == skb->len) 692 dev->name, skb->len, priv->lecid);
719 printk("%s\n", buf); 693 print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1,
720 else 694 skb->data, min(MAX_SKB_DUMP, skb->len), true);
721 printk("%s...\n", buf);
722#endif /* DUMP_PACKETS > 0 */ 695#endif /* DUMP_PACKETS > 0 */
723 if (memcmp(skb->data, lec_ctrl_magic, 4) == 0) { /* Control frame, to daemon */ 696 if (memcmp(skb->data, lec_ctrl_magic, 4) == 0) {
697 /* Control frame, to daemon */
724 struct sock *sk = sk_atm(vcc); 698 struct sock *sk = sk_atm(vcc);
725 699
726 pr_debug("%s: To daemon\n", dev->name); 700 pr_debug("%s: To daemon\n", dev->name);
@@ -778,9 +752,8 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
778 dev_kfree_skb(skb); 752 dev_kfree_skb(skb);
779 return; 753 return;
780 } 754 }
781 if (!hlist_empty(&priv->lec_arp_empty_ones)) { 755 if (!hlist_empty(&priv->lec_arp_empty_ones))
782 lec_arp_check_empties(priv, vcc, skb); 756 lec_arp_check_empties(priv, vcc, skb);
783 }
784 skb_pull(skb, 2); /* skip lec_id */ 757 skb_pull(skb, 2); /* skip lec_id */
785#ifdef CONFIG_TR 758#ifdef CONFIG_TR
786 if (priv->is_trdev) 759 if (priv->is_trdev)
@@ -801,7 +774,7 @@ static void lec_pop(struct atm_vcc *vcc, struct sk_buff *skb)
801 struct net_device *dev = skb->dev; 774 struct net_device *dev = skb->dev;
802 775
803 if (vpriv == NULL) { 776 if (vpriv == NULL) {
804 printk("lec_pop(): vpriv = NULL!?!?!?\n"); 777 pr_info("vpriv = NULL!?!?!?\n");
805 return; 778 return;
806 } 779 }
807 780
@@ -822,15 +795,13 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
822 795
823 /* Lecd must be up in this case */ 796 /* Lecd must be up in this case */
824 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc)); 797 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
825 if (bytes_left != 0) { 798 if (bytes_left != 0)
826 printk 799 pr_info("copy from user failed for %d bytes\n", bytes_left);
827 ("lec: lec_vcc_attach, copy from user failed for %d bytes\n",
828 bytes_left);
829 }
830 if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF || 800 if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF ||
831 !dev_lec[ioc_data.dev_num]) 801 !dev_lec[ioc_data.dev_num])
832 return -EINVAL; 802 return -EINVAL;
833 if (!(vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL))) 803 vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
804 if (!vpriv)
834 return -ENOMEM; 805 return -ENOMEM;
835 vpriv->xoff = 0; 806 vpriv->xoff = 0;
836 vpriv->old_pop = vcc->pop; 807 vpriv->old_pop = vcc->pop;
@@ -921,9 +892,8 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
921 priv->flush_timeout = (4 * HZ); 892 priv->flush_timeout = (4 * HZ);
922 priv->path_switching_delay = (6 * HZ); 893 priv->path_switching_delay = (6 * HZ);
923 894
924 if (dev_lec[i]->flags & IFF_UP) { 895 if (dev_lec[i]->flags & IFF_UP)
925 netif_start_queue(dev_lec[i]); 896 netif_start_queue(dev_lec[i]);
926 }
927 __module_get(THIS_MODULE); 897 __module_get(THIS_MODULE);
928 return i; 898 return i;
929} 899}
@@ -1125,7 +1095,9 @@ static int lec_seq_show(struct seq_file *seq, void *v)
1125 else { 1095 else {
1126 struct lec_state *state = seq->private; 1096 struct lec_state *state = seq->private;
1127 struct net_device *dev = state->dev; 1097 struct net_device *dev = state->dev;
1128 struct lec_arp_table *entry = hlist_entry(state->node, struct lec_arp_table, next); 1098 struct lec_arp_table *entry = hlist_entry(state->node,
1099 struct lec_arp_table,
1100 next);
1129 1101
1130 seq_printf(seq, "%s ", dev->name); 1102 seq_printf(seq, "%s ", dev->name);
1131 lec_info(seq, entry); 1103 lec_info(seq, entry);
@@ -1199,13 +1171,13 @@ static int __init lane_module_init(void)
1199 1171
1200 p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops); 1172 p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops);
1201 if (!p) { 1173 if (!p) {
1202 printk(KERN_ERR "Unable to initialize /proc/net/atm/lec\n"); 1174 pr_err("Unable to initialize /proc/net/atm/lec\n");
1203 return -ENOMEM; 1175 return -ENOMEM;
1204 } 1176 }
1205#endif 1177#endif
1206 1178
1207 register_atm_ioctl(&lane_ioctl_ops); 1179 register_atm_ioctl(&lane_ioctl_ops);
1208 printk("lec.c: " __DATE__ " " __TIME__ " initialized\n"); 1180 pr_info("lec.c: " __DATE__ " " __TIME__ " initialized\n");
1209 return 0; 1181 return 0;
1210} 1182}
1211 1183
@@ -1294,13 +1266,13 @@ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
1294 struct lec_priv *priv = netdev_priv(dev); 1266 struct lec_priv *priv = netdev_priv(dev);
1295 1267
1296 if (compare_ether_addr(lan_dst, dev->dev_addr)) 1268 if (compare_ether_addr(lan_dst, dev->dev_addr))
1297 return (0); /* not our mac address */ 1269 return 0; /* not our mac address */
1298 1270
1299 kfree(priv->tlvs); /* NULL if there was no previous association */ 1271 kfree(priv->tlvs); /* NULL if there was no previous association */
1300 1272
1301 priv->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL); 1273 priv->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL);
1302 if (priv->tlvs == NULL) 1274 if (priv->tlvs == NULL)
1303 return (0); 1275 return 0;
1304 priv->sizeoftlvs = sizeoftlvs; 1276 priv->sizeoftlvs = sizeoftlvs;
1305 1277
1306 skb = alloc_skb(sizeoftlvs, GFP_ATOMIC); 1278 skb = alloc_skb(sizeoftlvs, GFP_ATOMIC);
@@ -1310,12 +1282,12 @@ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
1310 skb_copy_to_linear_data(skb, tlvs, sizeoftlvs); 1282 skb_copy_to_linear_data(skb, tlvs, sizeoftlvs);
1311 retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb); 1283 retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb);
1312 if (retval != 0) 1284 if (retval != 0)
1313 printk("lec.c: lane2_associate_req() failed\n"); 1285 pr_info("lec.c: lane2_associate_req() failed\n");
1314 /* 1286 /*
1315 * If the previous association has changed we must 1287 * If the previous association has changed we must
1316 * somehow notify other LANE entities about the change 1288 * somehow notify other LANE entities about the change
1317 */ 1289 */
1318 return (1); 1290 return 1;
1319} 1291}
1320 1292
1321/* 1293/*
@@ -1348,12 +1320,12 @@ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr,
1348 entry->sizeoftlvs = sizeoftlvs; 1320 entry->sizeoftlvs = sizeoftlvs;
1349#endif 1321#endif
1350#if 0 1322#if 0
1351 printk("lec.c: lane2_associate_ind()\n"); 1323 pr_info("\n");
1352 printk("dump of tlvs, sizeoftlvs=%d\n", sizeoftlvs); 1324 pr_info("dump of tlvs, sizeoftlvs=%d\n", sizeoftlvs);
1353 while (i < sizeoftlvs) 1325 while (i < sizeoftlvs)
1354 printk("%02x ", tlvs[i++]); 1326 pr_cont("%02x ", tlvs[i++]);
1355 1327
1356 printk("\n"); 1328 pr_cont("\n");
1357#endif 1329#endif
1358 1330
1359 /* tell MPOA about the TLVs we saw */ 1331 /* tell MPOA about the TLVs we saw */
@@ -1373,15 +1345,15 @@ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr,
1373 1345
1374#include <linux/types.h> 1346#include <linux/types.h>
1375#include <linux/timer.h> 1347#include <linux/timer.h>
1376#include <asm/param.h> 1348#include <linux/param.h>
1377#include <asm/atomic.h> 1349#include <asm/atomic.h>
1378#include <linux/inetdevice.h> 1350#include <linux/inetdevice.h>
1379#include <net/route.h> 1351#include <net/route.h>
1380 1352
1381#if 0 1353#if 0
1382#define pr_debug(format,args...) 1354#define pr_debug(format, args...)
1383/* 1355/*
1384#define pr_debug printk 1356 #define pr_debug printk
1385*/ 1357*/
1386#endif 1358#endif
1387#define DEBUG_ARP_TABLE 0 1359#define DEBUG_ARP_TABLE 0
@@ -1395,7 +1367,7 @@ static void lec_arp_expire_arp(unsigned long data);
1395 * Arp table funcs 1367 * Arp table funcs
1396 */ 1368 */
1397 1369
1398#define HASH(ch) (ch & (LEC_ARP_TABLE_SIZE -1)) 1370#define HASH(ch) (ch & (LEC_ARP_TABLE_SIZE - 1))
1399 1371
1400/* 1372/*
1401 * Initialization of arp-cache 1373 * Initialization of arp-cache
@@ -1404,9 +1376,8 @@ static void lec_arp_init(struct lec_priv *priv)
1404{ 1376{
1405 unsigned short i; 1377 unsigned short i;
1406 1378
1407 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1379 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
1408 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); 1380 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
1409 }
1410 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); 1381 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
1411 INIT_HLIST_HEAD(&priv->lec_no_forward); 1382 INIT_HLIST_HEAD(&priv->lec_no_forward);
1412 INIT_HLIST_HEAD(&priv->mcast_fwds); 1383 INIT_HLIST_HEAD(&priv->mcast_fwds);
@@ -1450,10 +1421,7 @@ lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry)
1450 tmp = &priv->lec_arp_tables[HASH(entry->mac_addr[ETH_ALEN - 1])]; 1421 tmp = &priv->lec_arp_tables[HASH(entry->mac_addr[ETH_ALEN - 1])];
1451 hlist_add_head(&entry->next, tmp); 1422 hlist_add_head(&entry->next, tmp);
1452 1423
1453 pr_debug("LEC_ARP: Added entry:%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", 1424 pr_debug("Added entry:%pM\n", entry->mac_addr);
1454 0xff & entry->mac_addr[0], 0xff & entry->mac_addr[1],
1455 0xff & entry->mac_addr[2], 0xff & entry->mac_addr[3],
1456 0xff & entry->mac_addr[4], 0xff & entry->mac_addr[5]);
1457} 1425}
1458 1426
1459/* 1427/*
@@ -1466,20 +1434,23 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
1466 struct lec_arp_table *entry; 1434 struct lec_arp_table *entry;
1467 int i, remove_vcc = 1; 1435 int i, remove_vcc = 1;
1468 1436
1469 if (!to_remove) { 1437 if (!to_remove)
1470 return -1; 1438 return -1;
1471 }
1472 1439
1473 hlist_del(&to_remove->next); 1440 hlist_del(&to_remove->next);
1474 del_timer(&to_remove->timer); 1441 del_timer(&to_remove->timer);
1475 1442
1476 /* If this is the only MAC connected to this VCC, also tear down the VCC */ 1443 /*
1444 * If this is the only MAC connected to this VCC,
1445 * also tear down the VCC
1446 */
1477 if (to_remove->status >= ESI_FLUSH_PENDING) { 1447 if (to_remove->status >= ESI_FLUSH_PENDING) {
1478 /* 1448 /*
1479 * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT 1449 * ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT
1480 */ 1450 */
1481 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1451 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1482 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { 1452 hlist_for_each_entry(entry, node,
1453 &priv->lec_arp_tables[i], next) {
1483 if (memcmp(to_remove->atm_addr, 1454 if (memcmp(to_remove->atm_addr,
1484 entry->atm_addr, ATM_ESA_LEN) == 0) { 1455 entry->atm_addr, ATM_ESA_LEN) == 0) {
1485 remove_vcc = 0; 1456 remove_vcc = 0;
@@ -1492,10 +1463,7 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
1492 } 1463 }
1493 skb_queue_purge(&to_remove->tx_wait); /* FIXME: good place for this? */ 1464 skb_queue_purge(&to_remove->tx_wait); /* FIXME: good place for this? */
1494 1465
1495 pr_debug("LEC_ARP: Removed entry:%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", 1466 pr_debug("Removed entry:%pM\n", to_remove->mac_addr);
1496 0xff & to_remove->mac_addr[0], 0xff & to_remove->mac_addr[1],
1497 0xff & to_remove->mac_addr[2], 0xff & to_remove->mac_addr[3],
1498 0xff & to_remove->mac_addr[4], 0xff & to_remove->mac_addr[5]);
1499 return 0; 1467 return 0;
1500} 1468}
1501 1469
@@ -1513,9 +1481,8 @@ static const char *get_status_string(unsigned char st)
1513 return "ESI_FLUSH_PENDING"; 1481 return "ESI_FLUSH_PENDING";
1514 case ESI_FORWARD_DIRECT: 1482 case ESI_FORWARD_DIRECT:
1515 return "ESI_FORWARD_DIRECT"; 1483 return "ESI_FORWARD_DIRECT";
1516 default:
1517 return "<UNKNOWN>";
1518 } 1484 }
1485 return "<UNKNOWN>";
1519} 1486}
1520 1487
1521static void dump_arp_table(struct lec_priv *priv) 1488static void dump_arp_table(struct lec_priv *priv)
@@ -1525,18 +1492,15 @@ static void dump_arp_table(struct lec_priv *priv)
1525 char buf[256]; 1492 char buf[256];
1526 int i, j, offset; 1493 int i, j, offset;
1527 1494
1528 printk("Dump %p:\n", priv); 1495 pr_info("Dump %p:\n", priv);
1529 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1496 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1530 hlist_for_each_entry(rulla, node, &priv->lec_arp_tables[i], next) { 1497 hlist_for_each_entry(rulla, node,
1498 &priv->lec_arp_tables[i], next) {
1531 offset = 0; 1499 offset = 0;
1532 offset += sprintf(buf, "%d: %p\n", i, rulla); 1500 offset += sprintf(buf, "%d: %p\n", i, rulla);
1533 offset += sprintf(buf + offset, "Mac:"); 1501 offset += sprintf(buf + offset, "Mac: %pM",
1534 for (j = 0; j < ETH_ALEN; j++) { 1502 rulla->mac_addr);
1535 offset += sprintf(buf + offset, 1503 offset += sprintf(buf + offset, " Atm:");
1536 "%2.2x ",
1537 rulla->mac_addr[j] & 0xff);
1538 }
1539 offset += sprintf(buf + offset, "Atm:");
1540 for (j = 0; j < ATM_ESA_LEN; j++) { 1504 for (j = 0; j < ATM_ESA_LEN; j++) {
1541 offset += sprintf(buf + offset, 1505 offset += sprintf(buf + offset,
1542 "%2.2x ", 1506 "%2.2x ",
@@ -1556,20 +1520,16 @@ static void dump_arp_table(struct lec_priv *priv)
1556 "Flags:%x, Packets_flooded:%x, Status: %s ", 1520 "Flags:%x, Packets_flooded:%x, Status: %s ",
1557 rulla->flags, rulla->packets_flooded, 1521 rulla->flags, rulla->packets_flooded,
1558 get_status_string(rulla->status)); 1522 get_status_string(rulla->status));
1559 printk("%s\n", buf); 1523 pr_info("%s\n", buf);
1560 } 1524 }
1561 } 1525 }
1562 1526
1563 if (!hlist_empty(&priv->lec_no_forward)) 1527 if (!hlist_empty(&priv->lec_no_forward))
1564 printk("No forward\n"); 1528 pr_info("No forward\n");
1565 hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) { 1529 hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) {
1566 offset = 0; 1530 offset = 0;
1567 offset += sprintf(buf + offset, "Mac:"); 1531 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
1568 for (j = 0; j < ETH_ALEN; j++) { 1532 offset += sprintf(buf + offset, " Atm:");
1569 offset += sprintf(buf + offset, "%2.2x ",
1570 rulla->mac_addr[j] & 0xff);
1571 }
1572 offset += sprintf(buf + offset, "Atm:");
1573 for (j = 0; j < ATM_ESA_LEN; j++) { 1533 for (j = 0; j < ATM_ESA_LEN; j++) {
1574 offset += sprintf(buf + offset, "%2.2x ", 1534 offset += sprintf(buf + offset, "%2.2x ",
1575 rulla->atm_addr[j] & 0xff); 1535 rulla->atm_addr[j] & 0xff);
@@ -1586,19 +1546,15 @@ static void dump_arp_table(struct lec_priv *priv)
1586 "Flags:%x, Packets_flooded:%x, Status: %s ", 1546 "Flags:%x, Packets_flooded:%x, Status: %s ",
1587 rulla->flags, rulla->packets_flooded, 1547 rulla->flags, rulla->packets_flooded,
1588 get_status_string(rulla->status)); 1548 get_status_string(rulla->status));
1589 printk("%s\n", buf); 1549 pr_info("%s\n", buf);
1590 } 1550 }
1591 1551
1592 if (!hlist_empty(&priv->lec_arp_empty_ones)) 1552 if (!hlist_empty(&priv->lec_arp_empty_ones))
1593 printk("Empty ones\n"); 1553 pr_info("Empty ones\n");
1594 hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) { 1554 hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) {
1595 offset = 0; 1555 offset = 0;
1596 offset += sprintf(buf + offset, "Mac:"); 1556 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
1597 for (j = 0; j < ETH_ALEN; j++) { 1557 offset += sprintf(buf + offset, " Atm:");
1598 offset += sprintf(buf + offset, "%2.2x ",
1599 rulla->mac_addr[j] & 0xff);
1600 }
1601 offset += sprintf(buf + offset, "Atm:");
1602 for (j = 0; j < ATM_ESA_LEN; j++) { 1558 for (j = 0; j < ATM_ESA_LEN; j++) {
1603 offset += sprintf(buf + offset, "%2.2x ", 1559 offset += sprintf(buf + offset, "%2.2x ",
1604 rulla->atm_addr[j] & 0xff); 1560 rulla->atm_addr[j] & 0xff);
@@ -1615,19 +1571,15 @@ static void dump_arp_table(struct lec_priv *priv)
1615 "Flags:%x, Packets_flooded:%x, Status: %s ", 1571 "Flags:%x, Packets_flooded:%x, Status: %s ",
1616 rulla->flags, rulla->packets_flooded, 1572 rulla->flags, rulla->packets_flooded,
1617 get_status_string(rulla->status)); 1573 get_status_string(rulla->status));
1618 printk("%s", buf); 1574 pr_info("%s", buf);
1619 } 1575 }
1620 1576
1621 if (!hlist_empty(&priv->mcast_fwds)) 1577 if (!hlist_empty(&priv->mcast_fwds))
1622 printk("Multicast Forward VCCs\n"); 1578 pr_info("Multicast Forward VCCs\n");
1623 hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) { 1579 hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) {
1624 offset = 0; 1580 offset = 0;
1625 offset += sprintf(buf + offset, "Mac:"); 1581 offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
1626 for (j = 0; j < ETH_ALEN; j++) { 1582 offset += sprintf(buf + offset, " Atm:");
1627 offset += sprintf(buf + offset, "%2.2x ",
1628 rulla->mac_addr[j] & 0xff);
1629 }
1630 offset += sprintf(buf + offset, "Atm:");
1631 for (j = 0; j < ATM_ESA_LEN; j++) { 1583 for (j = 0; j < ATM_ESA_LEN; j++) {
1632 offset += sprintf(buf + offset, "%2.2x ", 1584 offset += sprintf(buf + offset, "%2.2x ",
1633 rulla->atm_addr[j] & 0xff); 1585 rulla->atm_addr[j] & 0xff);
@@ -1644,7 +1596,7 @@ static void dump_arp_table(struct lec_priv *priv)
1644 "Flags:%x, Packets_flooded:%x, Status: %s ", 1596 "Flags:%x, Packets_flooded:%x, Status: %s ",
1645 rulla->flags, rulla->packets_flooded, 1597 rulla->flags, rulla->packets_flooded,
1646 get_status_string(rulla->status)); 1598 get_status_string(rulla->status));
1647 printk("%s\n", buf); 1599 pr_info("%s\n", buf);
1648 } 1600 }
1649 1601
1650} 1602}
@@ -1670,14 +1622,16 @@ static void lec_arp_destroy(struct lec_priv *priv)
1670 1622
1671 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1623 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1672 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1624 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1673 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { 1625 hlist_for_each_entry_safe(entry, node, next,
1626 &priv->lec_arp_tables[i], next) {
1674 lec_arp_remove(priv, entry); 1627 lec_arp_remove(priv, entry);
1675 lec_arp_put(entry); 1628 lec_arp_put(entry);
1676 } 1629 }
1677 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]); 1630 INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
1678 } 1631 }
1679 1632
1680 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { 1633 hlist_for_each_entry_safe(entry, node, next,
1634 &priv->lec_arp_empty_ones, next) {
1681 del_timer_sync(&entry->timer); 1635 del_timer_sync(&entry->timer);
1682 lec_arp_clear_vccs(entry); 1636 lec_arp_clear_vccs(entry);
1683 hlist_del(&entry->next); 1637 hlist_del(&entry->next);
@@ -1685,7 +1639,8 @@ static void lec_arp_destroy(struct lec_priv *priv)
1685 } 1639 }
1686 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones); 1640 INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
1687 1641
1688 hlist_for_each_entry_safe(entry, node, next, &priv->lec_no_forward, next) { 1642 hlist_for_each_entry_safe(entry, node, next,
1643 &priv->lec_no_forward, next) {
1689 del_timer_sync(&entry->timer); 1644 del_timer_sync(&entry->timer);
1690 lec_arp_clear_vccs(entry); 1645 lec_arp_clear_vccs(entry);
1691 hlist_del(&entry->next); 1646 hlist_del(&entry->next);
@@ -1714,15 +1669,12 @@ static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
1714 struct hlist_head *head; 1669 struct hlist_head *head;
1715 struct lec_arp_table *entry; 1670 struct lec_arp_table *entry;
1716 1671
1717 pr_debug("LEC_ARP: lec_arp_find :%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", 1672 pr_debug("%pM\n", mac_addr);
1718 mac_addr[0] & 0xff, mac_addr[1] & 0xff, mac_addr[2] & 0xff,
1719 mac_addr[3] & 0xff, mac_addr[4] & 0xff, mac_addr[5] & 0xff);
1720 1673
1721 head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])]; 1674 head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])];
1722 hlist_for_each_entry(entry, node, head, next) { 1675 hlist_for_each_entry(entry, node, head, next) {
1723 if (!compare_ether_addr(mac_addr, entry->mac_addr)) { 1676 if (!compare_ether_addr(mac_addr, entry->mac_addr))
1724 return entry; 1677 return entry;
1725 }
1726 } 1678 }
1727 return NULL; 1679 return NULL;
1728} 1680}
@@ -1734,7 +1686,7 @@ static struct lec_arp_table *make_entry(struct lec_priv *priv,
1734 1686
1735 to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC); 1687 to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC);
1736 if (!to_return) { 1688 if (!to_return) {
1737 printk("LEC: Arp entry kmalloc failed\n"); 1689 pr_info("LEC: Arp entry kmalloc failed\n");
1738 return NULL; 1690 return NULL;
1739 } 1691 }
1740 memcpy(to_return->mac_addr, mac_addr, ETH_ALEN); 1692 memcpy(to_return->mac_addr, mac_addr, ETH_ALEN);
@@ -1755,7 +1707,7 @@ static void lec_arp_expire_arp(unsigned long data)
1755 1707
1756 entry = (struct lec_arp_table *)data; 1708 entry = (struct lec_arp_table *)data;
1757 1709
1758 pr_debug("lec_arp_expire_arp\n"); 1710 pr_debug("\n");
1759 if (entry->status == ESI_ARP_PENDING) { 1711 if (entry->status == ESI_ARP_PENDING) {
1760 if (entry->no_tries <= entry->priv->max_retry_count) { 1712 if (entry->no_tries <= entry->priv->max_retry_count) {
1761 if (entry->is_rdesc) 1713 if (entry->is_rdesc)
@@ -1779,10 +1731,10 @@ static void lec_arp_expire_vcc(unsigned long data)
1779 1731
1780 del_timer(&to_remove->timer); 1732 del_timer(&to_remove->timer);
1781 1733
1782 pr_debug("LEC_ARP %p %p: lec_arp_expire_vcc vpi:%d vci:%d\n", 1734 pr_debug("%p %p: vpi:%d vci:%d\n",
1783 to_remove, priv, 1735 to_remove, priv,
1784 to_remove->vcc ? to_remove->recv_vcc->vpi : 0, 1736 to_remove->vcc ? to_remove->recv_vcc->vpi : 0,
1785 to_remove->vcc ? to_remove->recv_vcc->vci : 0); 1737 to_remove->vcc ? to_remove->recv_vcc->vci : 0);
1786 1738
1787 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1739 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1788 hlist_del(&to_remove->next); 1740 hlist_del(&to_remove->next);
@@ -1792,6 +1744,50 @@ static void lec_arp_expire_vcc(unsigned long data)
1792 lec_arp_put(to_remove); 1744 lec_arp_put(to_remove);
1793} 1745}
1794 1746
1747static bool __lec_arp_check_expire(struct lec_arp_table *entry,
1748 unsigned long now,
1749 struct lec_priv *priv)
1750{
1751 unsigned long time_to_check;
1752
1753 if ((entry->flags) & LEC_REMOTE_FLAG && priv->topology_change)
1754 time_to_check = priv->forward_delay_time;
1755 else
1756 time_to_check = priv->aging_time;
1757
1758 pr_debug("About to expire: %lx - %lx > %lx\n",
1759 now, entry->last_used, time_to_check);
1760 if (time_after(now, entry->last_used + time_to_check) &&
1761 !(entry->flags & LEC_PERMANENT_FLAG) &&
1762 !(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */
1763 /* Remove entry */
1764 pr_debug("Entry timed out\n");
1765 lec_arp_remove(priv, entry);
1766 lec_arp_put(entry);
1767 } else {
1768 /* Something else */
1769 if ((entry->status == ESI_VC_PENDING ||
1770 entry->status == ESI_ARP_PENDING) &&
1771 time_after_eq(now, entry->timestamp +
1772 priv->max_unknown_frame_time)) {
1773 entry->timestamp = jiffies;
1774 entry->packets_flooded = 0;
1775 if (entry->status == ESI_VC_PENDING)
1776 send_to_lecd(priv, l_svc_setup,
1777 entry->mac_addr,
1778 entry->atm_addr,
1779 NULL);
1780 }
1781 if (entry->status == ESI_FLUSH_PENDING &&
1782 time_after_eq(now, entry->timestamp +
1783 priv->path_switching_delay)) {
1784 lec_arp_hold(entry);
1785 return true;
1786 }
1787 }
1788
1789 return false;
1790}
1795/* 1791/*
1796 * Expire entries. 1792 * Expire entries.
1797 * 1. Re-set timer 1793 * 1. Re-set timer
@@ -1816,62 +1812,28 @@ static void lec_arp_check_expire(struct work_struct *work)
1816 struct hlist_node *node, *next; 1812 struct hlist_node *node, *next;
1817 struct lec_arp_table *entry; 1813 struct lec_arp_table *entry;
1818 unsigned long now; 1814 unsigned long now;
1819 unsigned long time_to_check;
1820 int i; 1815 int i;
1821 1816
1822 pr_debug("lec_arp_check_expire %p\n", priv); 1817 pr_debug("%p\n", priv);
1823 now = jiffies; 1818 now = jiffies;
1824restart: 1819restart:
1825 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1820 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1826 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1821 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1827 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { 1822 hlist_for_each_entry_safe(entry, node, next,
1828 if ((entry->flags) & LEC_REMOTE_FLAG && 1823 &priv->lec_arp_tables[i], next) {
1829 priv->topology_change) 1824 if (__lec_arp_check_expire(entry, now, priv)) {
1830 time_to_check = priv->forward_delay_time; 1825 struct sk_buff *skb;
1831 else 1826 struct atm_vcc *vcc = entry->vcc;
1832 time_to_check = priv->aging_time; 1827
1833 1828 spin_unlock_irqrestore(&priv->lec_arp_lock,
1834 pr_debug("About to expire: %lx - %lx > %lx\n", 1829 flags);
1835 now, entry->last_used, time_to_check); 1830 while ((skb = skb_dequeue(&entry->tx_wait)))
1836 if (time_after(now, entry->last_used + time_to_check) 1831 lec_send(vcc, skb);
1837 && !(entry->flags & LEC_PERMANENT_FLAG) 1832 entry->last_used = jiffies;
1838 && !(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */ 1833 entry->status = ESI_FORWARD_DIRECT;
1839 /* Remove entry */
1840 pr_debug("LEC:Entry timed out\n");
1841 lec_arp_remove(priv, entry);
1842 lec_arp_put(entry); 1834 lec_arp_put(entry);
1843 } else { 1835
1844 /* Something else */ 1836 goto restart;
1845 if ((entry->status == ESI_VC_PENDING ||
1846 entry->status == ESI_ARP_PENDING)
1847 && time_after_eq(now,
1848 entry->timestamp +
1849 priv->
1850 max_unknown_frame_time)) {
1851 entry->timestamp = jiffies;
1852 entry->packets_flooded = 0;
1853 if (entry->status == ESI_VC_PENDING)
1854 send_to_lecd(priv, l_svc_setup,
1855 entry->mac_addr,
1856 entry->atm_addr,
1857 NULL);
1858 }
1859 if (entry->status == ESI_FLUSH_PENDING
1860 &&
1861 time_after_eq(now, entry->timestamp +
1862 priv->path_switching_delay)) {
1863 struct sk_buff *skb;
1864 struct atm_vcc *vcc = entry->vcc;
1865
1866 lec_arp_hold(entry);
1867 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
1868 while ((skb = skb_dequeue(&entry->tx_wait)) != NULL)
1869 lec_send(vcc, skb);
1870 entry->last_used = jiffies;
1871 entry->status = ESI_FORWARD_DIRECT;
1872 lec_arp_put(entry);
1873 goto restart;
1874 }
1875 } 1837 }
1876 } 1838 }
1877 } 1839 }
@@ -1885,7 +1847,8 @@ restart:
1885 * 1847 *
1886 */ 1848 */
1887static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, 1849static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1888 const unsigned char *mac_to_find, int is_rdesc, 1850 const unsigned char *mac_to_find,
1851 int is_rdesc,
1889 struct lec_arp_table **ret_entry) 1852 struct lec_arp_table **ret_entry)
1890{ 1853{
1891 unsigned long flags; 1854 unsigned long flags;
@@ -1921,9 +1884,8 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1921 * If the LE_ARP cache entry is still pending, reset count to 0 1884 * If the LE_ARP cache entry is still pending, reset count to 0
1922 * so another LE_ARP request can be made for this frame. 1885 * so another LE_ARP request can be made for this frame.
1923 */ 1886 */
1924 if (entry->status == ESI_ARP_PENDING) { 1887 if (entry->status == ESI_ARP_PENDING)
1925 entry->no_tries = 0; 1888 entry->no_tries = 0;
1926 }
1927 /* 1889 /*
1928 * Data direct VC not yet set up, check to see if the unknown 1890 * Data direct VC not yet set up, check to see if the unknown
1929 * frame count is greater than the limit. If the limit has 1891 * frame count is greater than the limit. If the limit has
@@ -1934,7 +1896,7 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1934 entry->packets_flooded < 1896 entry->packets_flooded <
1935 priv->maximum_unknown_frame_count) { 1897 priv->maximum_unknown_frame_count) {
1936 entry->packets_flooded++; 1898 entry->packets_flooded++;
1937 pr_debug("LEC_ARP: Flooding..\n"); 1899 pr_debug("Flooding..\n");
1938 found = priv->mcast_vcc; 1900 found = priv->mcast_vcc;
1939 goto out; 1901 goto out;
1940 } 1902 }
@@ -1945,13 +1907,13 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1945 */ 1907 */
1946 lec_arp_hold(entry); 1908 lec_arp_hold(entry);
1947 *ret_entry = entry; 1909 *ret_entry = entry;
1948 pr_debug("lec: entry->status %d entry->vcc %p\n", entry->status, 1910 pr_debug("entry->status %d entry->vcc %p\n", entry->status,
1949 entry->vcc); 1911 entry->vcc);
1950 found = NULL; 1912 found = NULL;
1951 } else { 1913 } else {
1952 /* No matching entry was found */ 1914 /* No matching entry was found */
1953 entry = make_entry(priv, mac_to_find); 1915 entry = make_entry(priv, mac_to_find);
1954 pr_debug("LEC_ARP: Making entry\n"); 1916 pr_debug("Making entry\n");
1955 if (!entry) { 1917 if (!entry) {
1956 found = priv->mcast_vcc; 1918 found = priv->mcast_vcc;
1957 goto out; 1919 goto out;
@@ -1988,13 +1950,14 @@ lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr,
1988 struct lec_arp_table *entry; 1950 struct lec_arp_table *entry;
1989 int i; 1951 int i;
1990 1952
1991 pr_debug("lec_addr_delete\n"); 1953 pr_debug("\n");
1992 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1954 spin_lock_irqsave(&priv->lec_arp_lock, flags);
1993 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 1955 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
1994 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { 1956 hlist_for_each_entry_safe(entry, node, next,
1995 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) 1957 &priv->lec_arp_tables[i], next) {
1996 && (permanent || 1958 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) &&
1997 !(entry->flags & LEC_PERMANENT_FLAG))) { 1959 (permanent ||
1960 !(entry->flags & LEC_PERMANENT_FLAG))) {
1998 lec_arp_remove(priv, entry); 1961 lec_arp_remove(priv, entry);
1999 lec_arp_put(entry); 1962 lec_arp_put(entry);
2000 } 1963 }
@@ -2019,10 +1982,8 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
2019 struct lec_arp_table *entry, *tmp; 1982 struct lec_arp_table *entry, *tmp;
2020 int i; 1983 int i;
2021 1984
2022 pr_debug("lec:%s", (targetless_le_arp) ? "targetless " : " "); 1985 pr_debug("%smac:%pM\n",
2023 pr_debug("lec_arp_update mac:%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", 1986 (targetless_le_arp) ? "targetless " : "", mac_addr);
2024 mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
2025 mac_addr[4], mac_addr[5]);
2026 1987
2027 spin_lock_irqsave(&priv->lec_arp_lock, flags); 1988 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2028 entry = lec_arp_find(priv, mac_addr); 1989 entry = lec_arp_find(priv, mac_addr);
@@ -2032,7 +1993,8 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
2032 * we have no entry in the cache. 7.1.30 1993 * we have no entry in the cache. 7.1.30
2033 */ 1994 */
2034 if (!hlist_empty(&priv->lec_arp_empty_ones)) { 1995 if (!hlist_empty(&priv->lec_arp_empty_ones)) {
2035 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { 1996 hlist_for_each_entry_safe(entry, node, next,
1997 &priv->lec_arp_empty_ones, next) {
2036 if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) { 1998 if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) {
2037 hlist_del(&entry->next); 1999 hlist_del(&entry->next);
2038 del_timer(&entry->timer); 2000 del_timer(&entry->timer);
@@ -2076,7 +2038,8 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
2076 memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN); 2038 memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN);
2077 del_timer(&entry->timer); 2039 del_timer(&entry->timer);
2078 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2040 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2079 hlist_for_each_entry(tmp, node, &priv->lec_arp_tables[i], next) { 2041 hlist_for_each_entry(tmp, node,
2042 &priv->lec_arp_tables[i], next) {
2080 if (entry != tmp && 2043 if (entry != tmp &&
2081 !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) { 2044 !memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) {
2082 /* Vcc to this host exists */ 2045 /* Vcc to this host exists */
@@ -2121,14 +2084,13 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
2121 int i, found_entry = 0; 2084 int i, found_entry = 0;
2122 2085
2123 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2086 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2087 /* Vcc for Multicast Forward. No timer, LANEv2 7.1.20 and 2.3.5.3 */
2124 if (ioc_data->receive == 2) { 2088 if (ioc_data->receive == 2) {
2125 /* Vcc for Multicast Forward. No timer, LANEv2 7.1.20 and 2.3.5.3 */
2126
2127 pr_debug("LEC_ARP: Attaching mcast forward\n"); 2089 pr_debug("LEC_ARP: Attaching mcast forward\n");
2128#if 0 2090#if 0
2129 entry = lec_arp_find(priv, bus_mac); 2091 entry = lec_arp_find(priv, bus_mac);
2130 if (!entry) { 2092 if (!entry) {
2131 printk("LEC_ARP: Multicast entry not found!\n"); 2093 pr_info("LEC_ARP: Multicast entry not found!\n");
2132 goto out; 2094 goto out;
2133 } 2095 }
2134 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN); 2096 memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
@@ -2149,19 +2111,17 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
2149 * Vcc which we don't want to make default vcc, 2111 * Vcc which we don't want to make default vcc,
2150 * attach it anyway. 2112 * attach it anyway.
2151 */ 2113 */
2152 pr_debug 2114 pr_debug("LEC_ARP:Attaching data direct, not default: %2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
2153 ("LEC_ARP:Attaching data direct, not default: " 2115 ioc_data->atm_addr[0], ioc_data->atm_addr[1],
2154 "%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", 2116 ioc_data->atm_addr[2], ioc_data->atm_addr[3],
2155 ioc_data->atm_addr[0], ioc_data->atm_addr[1], 2117 ioc_data->atm_addr[4], ioc_data->atm_addr[5],
2156 ioc_data->atm_addr[2], ioc_data->atm_addr[3], 2118 ioc_data->atm_addr[6], ioc_data->atm_addr[7],
2157 ioc_data->atm_addr[4], ioc_data->atm_addr[5], 2119 ioc_data->atm_addr[8], ioc_data->atm_addr[9],
2158 ioc_data->atm_addr[6], ioc_data->atm_addr[7], 2120 ioc_data->atm_addr[10], ioc_data->atm_addr[11],
2159 ioc_data->atm_addr[8], ioc_data->atm_addr[9], 2121 ioc_data->atm_addr[12], ioc_data->atm_addr[13],
2160 ioc_data->atm_addr[10], ioc_data->atm_addr[11], 2122 ioc_data->atm_addr[14], ioc_data->atm_addr[15],
2161 ioc_data->atm_addr[12], ioc_data->atm_addr[13], 2123 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2162 ioc_data->atm_addr[14], ioc_data->atm_addr[15], 2124 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2163 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2164 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2165 entry = make_entry(priv, bus_mac); 2125 entry = make_entry(priv, bus_mac);
2166 if (entry == NULL) 2126 if (entry == NULL)
2167 goto out; 2127 goto out;
@@ -2177,29 +2137,28 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
2177 dump_arp_table(priv); 2137 dump_arp_table(priv);
2178 goto out; 2138 goto out;
2179 } 2139 }
2180 pr_debug 2140 pr_debug("LEC_ARP:Attaching data direct, default: %2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
2181 ("LEC_ARP:Attaching data direct, default: " 2141 ioc_data->atm_addr[0], ioc_data->atm_addr[1],
2182 "%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", 2142 ioc_data->atm_addr[2], ioc_data->atm_addr[3],
2183 ioc_data->atm_addr[0], ioc_data->atm_addr[1], 2143 ioc_data->atm_addr[4], ioc_data->atm_addr[5],
2184 ioc_data->atm_addr[2], ioc_data->atm_addr[3], 2144 ioc_data->atm_addr[6], ioc_data->atm_addr[7],
2185 ioc_data->atm_addr[4], ioc_data->atm_addr[5], 2145 ioc_data->atm_addr[8], ioc_data->atm_addr[9],
2186 ioc_data->atm_addr[6], ioc_data->atm_addr[7], 2146 ioc_data->atm_addr[10], ioc_data->atm_addr[11],
2187 ioc_data->atm_addr[8], ioc_data->atm_addr[9], 2147 ioc_data->atm_addr[12], ioc_data->atm_addr[13],
2188 ioc_data->atm_addr[10], ioc_data->atm_addr[11], 2148 ioc_data->atm_addr[14], ioc_data->atm_addr[15],
2189 ioc_data->atm_addr[12], ioc_data->atm_addr[13], 2149 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2190 ioc_data->atm_addr[14], ioc_data->atm_addr[15], 2150 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2191 ioc_data->atm_addr[16], ioc_data->atm_addr[17],
2192 ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
2193 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2151 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2194 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { 2152 hlist_for_each_entry(entry, node,
2153 &priv->lec_arp_tables[i], next) {
2195 if (memcmp 2154 if (memcmp
2196 (ioc_data->atm_addr, entry->atm_addr, 2155 (ioc_data->atm_addr, entry->atm_addr,
2197 ATM_ESA_LEN) == 0) { 2156 ATM_ESA_LEN) == 0) {
2198 pr_debug("LEC_ARP: Attaching data direct\n"); 2157 pr_debug("LEC_ARP: Attaching data direct\n");
2199 pr_debug("Currently -> Vcc: %d, Rvcc:%d\n", 2158 pr_debug("Currently -> Vcc: %d, Rvcc:%d\n",
2200 entry->vcc ? entry->vcc->vci : 0, 2159 entry->vcc ? entry->vcc->vci : 0,
2201 entry->recv_vcc ? entry->recv_vcc-> 2160 entry->recv_vcc ? entry->recv_vcc->
2202 vci : 0); 2161 vci : 0);
2203 found_entry = 1; 2162 found_entry = 1;
2204 del_timer(&entry->timer); 2163 del_timer(&entry->timer);
2205 entry->vcc = vcc; 2164 entry->vcc = vcc;
@@ -2271,19 +2230,21 @@ static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id)
2271 struct lec_arp_table *entry; 2230 struct lec_arp_table *entry;
2272 int i; 2231 int i;
2273 2232
2274 pr_debug("LEC:lec_flush_complete %lx\n", tran_id); 2233 pr_debug("%lx\n", tran_id);
2275restart: 2234restart:
2276 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2235 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2277 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2236 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2278 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { 2237 hlist_for_each_entry(entry, node,
2279 if (entry->flush_tran_id == tran_id 2238 &priv->lec_arp_tables[i], next) {
2280 && entry->status == ESI_FLUSH_PENDING) { 2239 if (entry->flush_tran_id == tran_id &&
2240 entry->status == ESI_FLUSH_PENDING) {
2281 struct sk_buff *skb; 2241 struct sk_buff *skb;
2282 struct atm_vcc *vcc = entry->vcc; 2242 struct atm_vcc *vcc = entry->vcc;
2283 2243
2284 lec_arp_hold(entry); 2244 lec_arp_hold(entry);
2285 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2245 spin_unlock_irqrestore(&priv->lec_arp_lock,
2286 while ((skb = skb_dequeue(&entry->tx_wait)) != NULL) 2246 flags);
2247 while ((skb = skb_dequeue(&entry->tx_wait)))
2287 lec_send(vcc, skb); 2248 lec_send(vcc, skb);
2288 entry->last_used = jiffies; 2249 entry->last_used = jiffies;
2289 entry->status = ESI_FORWARD_DIRECT; 2250 entry->status = ESI_FORWARD_DIRECT;
@@ -2308,11 +2269,12 @@ lec_set_flush_tran_id(struct lec_priv *priv,
2308 2269
2309 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2270 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2310 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) 2271 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
2311 hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { 2272 hlist_for_each_entry(entry, node,
2273 &priv->lec_arp_tables[i], next) {
2312 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) { 2274 if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) {
2313 entry->flush_tran_id = tran_id; 2275 entry->flush_tran_id = tran_id;
2314 pr_debug("Set flush transaction id to %lx for %p\n", 2276 pr_debug("Set flush transaction id to %lx for %p\n",
2315 tran_id, entry); 2277 tran_id, entry);
2316 } 2278 }
2317 } 2279 }
2318 spin_unlock_irqrestore(&priv->lec_arp_lock, flags); 2280 spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
@@ -2328,7 +2290,8 @@ static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc)
2328 struct lec_vcc_priv *vpriv; 2290 struct lec_vcc_priv *vpriv;
2329 int err = 0; 2291 int err = 0;
2330 2292
2331 if (!(vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL))) 2293 vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
2294 if (!vpriv)
2332 return -ENOMEM; 2295 return -ENOMEM;
2333 vpriv->xoff = 0; 2296 vpriv->xoff = 0;
2334 vpriv->old_pop = vcc->pop; 2297 vpriv->old_pop = vcc->pop;
@@ -2368,18 +2331,19 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2368 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2331 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2369 2332
2370 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { 2333 for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
2371 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { 2334 hlist_for_each_entry_safe(entry, node, next,
2335 &priv->lec_arp_tables[i], next) {
2372 if (vcc == entry->vcc) { 2336 if (vcc == entry->vcc) {
2373 lec_arp_remove(priv, entry); 2337 lec_arp_remove(priv, entry);
2374 lec_arp_put(entry); 2338 lec_arp_put(entry);
2375 if (priv->mcast_vcc == vcc) { 2339 if (priv->mcast_vcc == vcc)
2376 priv->mcast_vcc = NULL; 2340 priv->mcast_vcc = NULL;
2377 }
2378 } 2341 }
2379 } 2342 }
2380 } 2343 }
2381 2344
2382 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { 2345 hlist_for_each_entry_safe(entry, node, next,
2346 &priv->lec_arp_empty_ones, next) {
2383 if (entry->vcc == vcc) { 2347 if (entry->vcc == vcc) {
2384 lec_arp_clear_vccs(entry); 2348 lec_arp_clear_vccs(entry);
2385 del_timer(&entry->timer); 2349 del_timer(&entry->timer);
@@ -2388,7 +2352,8 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
2388 } 2352 }
2389 } 2353 }
2390 2354
2391 hlist_for_each_entry_safe(entry, node, next, &priv->lec_no_forward, next) { 2355 hlist_for_each_entry_safe(entry, node, next,
2356 &priv->lec_no_forward, next) {
2392 if (entry->recv_vcc == vcc) { 2357 if (entry->recv_vcc == vcc) {
2393 lec_arp_clear_vccs(entry); 2358 lec_arp_clear_vccs(entry);
2394 del_timer(&entry->timer); 2359 del_timer(&entry->timer);
@@ -2429,14 +2394,16 @@ lec_arp_check_empties(struct lec_priv *priv,
2429 src = hdr->h_source; 2394 src = hdr->h_source;
2430 2395
2431 spin_lock_irqsave(&priv->lec_arp_lock, flags); 2396 spin_lock_irqsave(&priv->lec_arp_lock, flags);
2432 hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) { 2397 hlist_for_each_entry_safe(entry, node, next,
2398 &priv->lec_arp_empty_ones, next) {
2433 if (vcc == entry->vcc) { 2399 if (vcc == entry->vcc) {
2434 del_timer(&entry->timer); 2400 del_timer(&entry->timer);
2435 memcpy(entry->mac_addr, src, ETH_ALEN); 2401 memcpy(entry->mac_addr, src, ETH_ALEN);
2436 entry->status = ESI_FORWARD_DIRECT; 2402 entry->status = ESI_FORWARD_DIRECT;
2437 entry->last_used = jiffies; 2403 entry->last_used = jiffies;
2438 /* We might have got an entry */ 2404 /* We might have got an entry */
2439 if ((tmp = lec_arp_find(priv, src))) { 2405 tmp = lec_arp_find(priv, src);
2406 if (tmp) {
2440 lec_arp_remove(priv, tmp); 2407 lec_arp_remove(priv, tmp);
2441 lec_arp_put(tmp); 2408 lec_arp_put(tmp);
2442 } 2409 }
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 38a6cb0863f..a6521c8aa88 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1,3 +1,5 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
2
1#include <linux/kernel.h> 3#include <linux/kernel.h>
2#include <linux/string.h> 4#include <linux/string.h>
3#include <linux/timer.h> 5#include <linux/timer.h>
@@ -13,8 +15,8 @@
13#include <net/sock.h> 15#include <net/sock.h>
14#include <linux/skbuff.h> 16#include <linux/skbuff.h>
15#include <linux/ip.h> 17#include <linux/ip.h>
18#include <linux/uaccess.h>
16#include <asm/byteorder.h> 19#include <asm/byteorder.h>
17#include <asm/uaccess.h>
18#include <net/checksum.h> /* for ip_fast_csum() */ 20#include <net/checksum.h> /* for ip_fast_csum() */
19#include <net/arp.h> 21#include <net/arp.h>
20#include <net/dst.h> 22#include <net/dst.h>
@@ -36,31 +38,47 @@
36 */ 38 */
37 39
38#if 0 40#if 0
39#define dprintk printk /* debug */ 41#define dprintk(format, args...) \
42 printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args)
43#define dprintk_cont(format, args...) printk(KERN_CONT format, ##args)
40#else 44#else
41#define dprintk(format,args...) 45#define dprintk(format, args...) \
46 do { if (0) \
47 printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args);\
48 } while (0)
49#define dprintk_cont(format, args...) \
50 do { if (0) printk(KERN_CONT format, ##args); } while (0)
42#endif 51#endif
43 52
44#if 0 53#if 0
45#define ddprintk printk /* more debug */ 54#define ddprintk(format, args...) \
55 printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args)
56#define ddprintk_cont(format, args...) printk(KERN_CONT format, ##args)
46#else 57#else
47#define ddprintk(format,args...) 58#define ddprintk(format, args...) \
59 do { if (0) \
60 printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args);\
61 } while (0)
62#define ddprintk_cont(format, args...) \
63 do { if (0) printk(KERN_CONT format, ##args); } while (0)
48#endif 64#endif
49 65
50
51
52#define MPOA_TAG_LEN 4 66#define MPOA_TAG_LEN 4
53 67
54/* mpc_daemon -> kernel */ 68/* mpc_daemon -> kernel */
55static void MPOA_trigger_rcvd (struct k_message *msg, struct mpoa_client *mpc); 69static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc);
56static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc); 70static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc);
57static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc); 71static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc);
58static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc); 72static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc);
59static void mps_death(struct k_message *msg, struct mpoa_client *mpc); 73static void mps_death(struct k_message *msg, struct mpoa_client *mpc);
60static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action); 74static void clean_up(struct k_message *msg, struct mpoa_client *mpc,
61static void MPOA_cache_impos_rcvd(struct k_message *msg, struct mpoa_client *mpc); 75 int action);
62static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc); 76static void MPOA_cache_impos_rcvd(struct k_message *msg,
63static void set_mps_mac_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc); 77 struct mpoa_client *mpc);
78static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg,
79 struct mpoa_client *mpc);
80static void set_mps_mac_addr_rcvd(struct k_message *mesg,
81 struct mpoa_client *mpc);
64 82
65static const uint8_t *copy_macs(struct mpoa_client *mpc, 83static const uint8_t *copy_macs(struct mpoa_client *mpc,
66 const uint8_t *router_mac, 84 const uint8_t *router_mac,
@@ -74,10 +92,11 @@ static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb);
74 92
75static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb); 93static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb);
76static netdev_tx_t mpc_send_packet(struct sk_buff *skb, 94static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
77 struct net_device *dev); 95 struct net_device *dev);
78static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned long event, void *dev); 96static int mpoa_event_listener(struct notifier_block *mpoa_notifier,
97 unsigned long event, void *dev);
79static void mpc_timer_refresh(void); 98static void mpc_timer_refresh(void);
80static void mpc_cache_check( unsigned long checking_time ); 99static void mpc_cache_check(unsigned long checking_time);
81 100
82static struct llc_snap_hdr llc_snap_mpoa_ctrl = { 101static struct llc_snap_hdr llc_snap_mpoa_ctrl = {
83 0xaa, 0xaa, 0x03, 102 0xaa, 0xaa, 0x03,
@@ -167,7 +186,7 @@ struct atm_mpoa_qos *atm_mpoa_add_qos(__be32 dst_ip, struct atm_qos *qos)
167 186
168 entry = kmalloc(sizeof(struct atm_mpoa_qos), GFP_KERNEL); 187 entry = kmalloc(sizeof(struct atm_mpoa_qos), GFP_KERNEL);
169 if (entry == NULL) { 188 if (entry == NULL) {
170 printk("mpoa: atm_mpoa_add_qos: out of memory\n"); 189 pr_info("mpoa: out of memory\n");
171 return entry; 190 return entry;
172 } 191 }
173 192
@@ -185,10 +204,9 @@ struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip)
185 struct atm_mpoa_qos *qos; 204 struct atm_mpoa_qos *qos;
186 205
187 qos = qos_head; 206 qos = qos_head;
188 while( qos != NULL ){ 207 while (qos) {
189 if(qos->ipaddr == dst_ip) { 208 if (qos->ipaddr == dst_ip)
190 break; 209 break;
191 }
192 qos = qos->next; 210 qos = qos->next;
193 } 211 }
194 212
@@ -200,10 +218,10 @@ struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip)
200 */ 218 */
201int atm_mpoa_delete_qos(struct atm_mpoa_qos *entry) 219int atm_mpoa_delete_qos(struct atm_mpoa_qos *entry)
202{ 220{
203
204 struct atm_mpoa_qos *curr; 221 struct atm_mpoa_qos *curr;
205 222
206 if (entry == NULL) return 0; 223 if (entry == NULL)
224 return 0;
207 if (entry == qos_head) { 225 if (entry == qos_head) {
208 qos_head = qos_head->next; 226 qos_head = qos_head->next;
209 kfree(entry); 227 kfree(entry);
@@ -234,9 +252,17 @@ void atm_mpoa_disp_qos(struct seq_file *m)
234 252
235 while (qos != NULL) { 253 while (qos != NULL) {
236 seq_printf(m, "%pI4\n %-7d %-7d %-7d %-7d %-7d\n %-7d %-7d %-7d %-7d %-7d\n", 254 seq_printf(m, "%pI4\n %-7d %-7d %-7d %-7d %-7d\n %-7d %-7d %-7d %-7d %-7d\n",
237 &qos->ipaddr, 255 &qos->ipaddr,
238 qos->qos.txtp.max_pcr, qos->qos.txtp.pcr, qos->qos.txtp.min_pcr, qos->qos.txtp.max_cdv, qos->qos.txtp.max_sdu, 256 qos->qos.txtp.max_pcr,
239 qos->qos.rxtp.max_pcr, qos->qos.rxtp.pcr, qos->qos.rxtp.min_pcr, qos->qos.rxtp.max_cdv, qos->qos.rxtp.max_sdu); 257 qos->qos.txtp.pcr,
258 qos->qos.txtp.min_pcr,
259 qos->qos.txtp.max_cdv,
260 qos->qos.txtp.max_sdu,
261 qos->qos.rxtp.max_pcr,
262 qos->qos.rxtp.pcr,
263 qos->qos.rxtp.min_pcr,
264 qos->qos.rxtp.max_cdv,
265 qos->qos.rxtp.max_sdu);
240 qos = qos->next; 266 qos = qos->next;
241 } 267 }
242} 268}
@@ -256,7 +282,7 @@ static struct mpoa_client *alloc_mpc(void)
256{ 282{
257 struct mpoa_client *mpc; 283 struct mpoa_client *mpc;
258 284
259 mpc = kzalloc(sizeof (struct mpoa_client), GFP_KERNEL); 285 mpc = kzalloc(sizeof(struct mpoa_client), GFP_KERNEL);
260 if (mpc == NULL) 286 if (mpc == NULL)
261 return NULL; 287 return NULL;
262 rwlock_init(&mpc->ingress_lock); 288 rwlock_init(&mpc->ingress_lock);
@@ -266,7 +292,7 @@ static struct mpoa_client *alloc_mpc(void)
266 292
267 mpc->parameters.mpc_p1 = MPC_P1; 293 mpc->parameters.mpc_p1 = MPC_P1;
268 mpc->parameters.mpc_p2 = MPC_P2; 294 mpc->parameters.mpc_p2 = MPC_P2;
269 memset(mpc->parameters.mpc_p3,0,sizeof(mpc->parameters.mpc_p3)); 295 memset(mpc->parameters.mpc_p3, 0, sizeof(mpc->parameters.mpc_p3));
270 mpc->parameters.mpc_p4 = MPC_P4; 296 mpc->parameters.mpc_p4 = MPC_P4;
271 mpc->parameters.mpc_p5 = MPC_P5; 297 mpc->parameters.mpc_p5 = MPC_P5;
272 mpc->parameters.mpc_p6 = MPC_P6; 298 mpc->parameters.mpc_p6 = MPC_P6;
@@ -286,9 +312,9 @@ static struct mpoa_client *alloc_mpc(void)
286static void start_mpc(struct mpoa_client *mpc, struct net_device *dev) 312static void start_mpc(struct mpoa_client *mpc, struct net_device *dev)
287{ 313{
288 314
289 dprintk("mpoa: (%s) start_mpc:\n", mpc->dev->name); 315 dprintk("(%s)\n", mpc->dev->name);
290 if (!dev->netdev_ops) 316 if (!dev->netdev_ops)
291 printk("mpoa: (%s) start_mpc not starting\n", dev->name); 317 pr_info("(%s) not starting\n", dev->name);
292 else { 318 else {
293 mpc->old_ops = dev->netdev_ops; 319 mpc->old_ops = dev->netdev_ops;
294 mpc->new_ops = *mpc->old_ops; 320 mpc->new_ops = *mpc->old_ops;
@@ -300,14 +326,14 @@ static void start_mpc(struct mpoa_client *mpc, struct net_device *dev)
300static void stop_mpc(struct mpoa_client *mpc) 326static void stop_mpc(struct mpoa_client *mpc)
301{ 327{
302 struct net_device *dev = mpc->dev; 328 struct net_device *dev = mpc->dev;
303 dprintk("mpoa: (%s) stop_mpc:", mpc->dev->name); 329 dprintk("(%s)", mpc->dev->name);
304 330
305 /* Lets not nullify lec device's dev->hard_start_xmit */ 331 /* Lets not nullify lec device's dev->hard_start_xmit */
306 if (dev->netdev_ops != &mpc->new_ops) { 332 if (dev->netdev_ops != &mpc->new_ops) {
307 dprintk(" mpc already stopped, not fatal\n"); 333 dprintk_cont(" mpc already stopped, not fatal\n");
308 return; 334 return;
309 } 335 }
310 dprintk("\n"); 336 dprintk_cont("\n");
311 337
312 dev->netdev_ops = mpc->old_ops; 338 dev->netdev_ops = mpc->old_ops;
313 mpc->old_ops = NULL; 339 mpc->old_ops = NULL;
@@ -319,25 +345,18 @@ static const char *mpoa_device_type_string(char type) __attribute__ ((unused));
319 345
320static const char *mpoa_device_type_string(char type) 346static const char *mpoa_device_type_string(char type)
321{ 347{
322 switch(type) { 348 switch (type) {
323 case NON_MPOA: 349 case NON_MPOA:
324 return "non-MPOA device"; 350 return "non-MPOA device";
325 break;
326 case MPS: 351 case MPS:
327 return "MPS"; 352 return "MPS";
328 break;
329 case MPC: 353 case MPC:
330 return "MPC"; 354 return "MPC";
331 break;
332 case MPS_AND_MPC: 355 case MPS_AND_MPC:
333 return "both MPS and MPC"; 356 return "both MPS and MPC";
334 break;
335 default:
336 return "unspecified (non-MPOA) device";
337 break;
338 } 357 }
339 358
340 return ""; /* not reached */ 359 return "unspecified (non-MPOA) device";
341} 360}
342 361
343/* 362/*
@@ -362,26 +381,28 @@ static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr,
362 struct mpoa_client *mpc; 381 struct mpoa_client *mpc;
363 382
364 mpoa_device_type = number_of_mps_macs = 0; /* silence gcc */ 383 mpoa_device_type = number_of_mps_macs = 0; /* silence gcc */
365 dprintk("mpoa: (%s) lane2_assoc_ind: received TLV(s), ", dev->name); 384 dprintk("(%s) received TLV(s), ", dev->name);
366 dprintk("total length of all TLVs %d\n", sizeoftlvs); 385 dprintk("total length of all TLVs %d\n", sizeoftlvs);
367 mpc = find_mpc_by_lec(dev); /* Sampo-Fix: moved here from below */ 386 mpc = find_mpc_by_lec(dev); /* Sampo-Fix: moved here from below */
368 if (mpc == NULL) { 387 if (mpc == NULL) {
369 printk("mpoa: (%s) lane2_assoc_ind: no mpc\n", dev->name); 388 pr_info("(%s) no mpc\n", dev->name);
370 return; 389 return;
371 } 390 }
372 end_of_tlvs = tlvs + sizeoftlvs; 391 end_of_tlvs = tlvs + sizeoftlvs;
373 while (end_of_tlvs - tlvs >= 5) { 392 while (end_of_tlvs - tlvs >= 5) {
374 type = (tlvs[0] << 24) | (tlvs[1] << 16) | (tlvs[2] << 8) | tlvs[3]; 393 type = ((tlvs[0] << 24) | (tlvs[1] << 16) |
394 (tlvs[2] << 8) | tlvs[3]);
375 length = tlvs[4]; 395 length = tlvs[4];
376 tlvs += 5; 396 tlvs += 5;
377 dprintk(" type 0x%x length %02x\n", type, length); 397 dprintk(" type 0x%x length %02x\n", type, length);
378 if (tlvs + length > end_of_tlvs) { 398 if (tlvs + length > end_of_tlvs) {
379 printk("TLV value extends past its buffer, aborting parse\n"); 399 pr_info("TLV value extends past its buffer, aborting parse\n");
380 return; 400 return;
381 } 401 }
382 402
383 if (type == 0) { 403 if (type == 0) {
384 printk("mpoa: (%s) lane2_assoc_ind: TLV type was 0, returning\n", dev->name); 404 pr_info("mpoa: (%s) TLV type was 0, returning\n",
405 dev->name);
385 return; 406 return;
386 } 407 }
387 408
@@ -391,39 +412,48 @@ static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr,
391 } 412 }
392 mpoa_device_type = *tlvs++; 413 mpoa_device_type = *tlvs++;
393 number_of_mps_macs = *tlvs++; 414 number_of_mps_macs = *tlvs++;
394 dprintk("mpoa: (%s) MPOA device type '%s', ", dev->name, mpoa_device_type_string(mpoa_device_type)); 415 dprintk("(%s) MPOA device type '%s', ",
416 dev->name, mpoa_device_type_string(mpoa_device_type));
395 if (mpoa_device_type == MPS_AND_MPC && 417 if (mpoa_device_type == MPS_AND_MPC &&
396 length < (42 + number_of_mps_macs*ETH_ALEN)) { /* :) */ 418 length < (42 + number_of_mps_macs*ETH_ALEN)) { /* :) */
397 printk("\nmpoa: (%s) lane2_assoc_ind: short MPOA Device Type TLV\n", 419 pr_info("(%s) short MPOA Device Type TLV\n",
398 dev->name); 420 dev->name);
399 continue; 421 continue;
400 } 422 }
401 if ((mpoa_device_type == MPS || mpoa_device_type == MPC) 423 if ((mpoa_device_type == MPS || mpoa_device_type == MPC) &&
402 && length < 22 + number_of_mps_macs*ETH_ALEN) { 424 length < 22 + number_of_mps_macs*ETH_ALEN) {
403 printk("\nmpoa: (%s) lane2_assoc_ind: short MPOA Device Type TLV\n", 425 pr_info("(%s) short MPOA Device Type TLV\n", dev->name);
404 dev->name);
405 continue; 426 continue;
406 } 427 }
407 if (mpoa_device_type != MPS && mpoa_device_type != MPS_AND_MPC) { 428 if (mpoa_device_type != MPS &&
408 dprintk("ignoring non-MPS device\n"); 429 mpoa_device_type != MPS_AND_MPC) {
409 if (mpoa_device_type == MPC) tlvs += 20; 430 dprintk("ignoring non-MPS device ");
431 if (mpoa_device_type == MPC)
432 tlvs += 20;
410 continue; /* we are only interested in MPSs */ 433 continue; /* we are only interested in MPSs */
411 } 434 }
412 if (number_of_mps_macs == 0 && mpoa_device_type == MPS_AND_MPC) { 435 if (number_of_mps_macs == 0 &&
413 printk("\nmpoa: (%s) lane2_assoc_ind: MPS_AND_MPC has zero MACs\n", dev->name); 436 mpoa_device_type == MPS_AND_MPC) {
437 pr_info("(%s) MPS_AND_MPC has zero MACs\n", dev->name);
414 continue; /* someone should read the spec */ 438 continue; /* someone should read the spec */
415 } 439 }
416 dprintk("this MPS has %d MAC addresses\n", number_of_mps_macs); 440 dprintk_cont("this MPS has %d MAC addresses\n",
441 number_of_mps_macs);
417 442
418 /* ok, now we can go and tell our daemon the control address of MPS */ 443 /*
444 * ok, now we can go and tell our daemon
445 * the control address of MPS
446 */
419 send_set_mps_ctrl_addr(tlvs, mpc); 447 send_set_mps_ctrl_addr(tlvs, mpc);
420 448
421 tlvs = copy_macs(mpc, mac_addr, tlvs, number_of_mps_macs, mpoa_device_type); 449 tlvs = copy_macs(mpc, mac_addr, tlvs,
422 if (tlvs == NULL) return; 450 number_of_mps_macs, mpoa_device_type);
451 if (tlvs == NULL)
452 return;
423 } 453 }
424 if (end_of_tlvs - tlvs != 0) 454 if (end_of_tlvs - tlvs != 0)
425 printk("mpoa: (%s) lane2_assoc_ind: ignoring %Zd bytes of trailing TLV carbage\n", 455 pr_info("(%s) ignoring %Zd bytes of trailing TLV garbage\n",
426 dev->name, end_of_tlvs - tlvs); 456 dev->name, end_of_tlvs - tlvs);
427 return; 457 return;
428} 458}
429 459
@@ -441,11 +471,12 @@ static const uint8_t *copy_macs(struct mpoa_client *mpc,
441 num_macs = (mps_macs > 1) ? mps_macs : 1; 471 num_macs = (mps_macs > 1) ? mps_macs : 1;
442 472
443 if (mpc->number_of_mps_macs != num_macs) { /* need to reallocate? */ 473 if (mpc->number_of_mps_macs != num_macs) { /* need to reallocate? */
444 if (mpc->number_of_mps_macs != 0) kfree(mpc->mps_macs); 474 if (mpc->number_of_mps_macs != 0)
475 kfree(mpc->mps_macs);
445 mpc->number_of_mps_macs = 0; 476 mpc->number_of_mps_macs = 0;
446 mpc->mps_macs = kmalloc(num_macs*ETH_ALEN, GFP_KERNEL); 477 mpc->mps_macs = kmalloc(num_macs * ETH_ALEN, GFP_KERNEL);
447 if (mpc->mps_macs == NULL) { 478 if (mpc->mps_macs == NULL) {
448 printk("mpoa: (%s) copy_macs: out of mem\n", mpc->dev->name); 479 pr_info("(%s) out of mem\n", mpc->dev->name);
449 return NULL; 480 return NULL;
450 } 481 }
451 } 482 }
@@ -478,24 +509,30 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
478 iph = (struct iphdr *)buff; 509 iph = (struct iphdr *)buff;
479 ipaddr = iph->daddr; 510 ipaddr = iph->daddr;
480 511
481 ddprintk("mpoa: (%s) send_via_shortcut: ipaddr 0x%x\n", mpc->dev->name, ipaddr); 512 ddprintk("(%s) ipaddr 0x%x\n",
513 mpc->dev->name, ipaddr);
482 514
483 entry = mpc->in_ops->get(ipaddr, mpc); 515 entry = mpc->in_ops->get(ipaddr, mpc);
484 if (entry == NULL) { 516 if (entry == NULL) {
485 entry = mpc->in_ops->add_entry(ipaddr, mpc); 517 entry = mpc->in_ops->add_entry(ipaddr, mpc);
486 if (entry != NULL) mpc->in_ops->put(entry); 518 if (entry != NULL)
519 mpc->in_ops->put(entry);
487 return 1; 520 return 1;
488 } 521 }
489 if (mpc->in_ops->cache_hit(entry, mpc) != OPEN){ /* threshold not exceeded or VCC not ready */ 522 /* threshold not exceeded or VCC not ready */
490 ddprintk("mpoa: (%s) send_via_shortcut: cache_hit: returns != OPEN\n", mpc->dev->name); 523 if (mpc->in_ops->cache_hit(entry, mpc) != OPEN) {
524 ddprintk("(%s) cache_hit: returns != OPEN\n",
525 mpc->dev->name);
491 mpc->in_ops->put(entry); 526 mpc->in_ops->put(entry);
492 return 1; 527 return 1;
493 } 528 }
494 529
495 ddprintk("mpoa: (%s) send_via_shortcut: using shortcut\n", mpc->dev->name); 530 ddprintk("(%s) using shortcut\n",
531 mpc->dev->name);
496 /* MPOA spec A.1.4, MPOA client must decrement IP ttl at least by one */ 532 /* MPOA spec A.1.4, MPOA client must decrement IP ttl at least by one */
497 if (iph->ttl <= 1) { 533 if (iph->ttl <= 1) {
498 ddprintk("mpoa: (%s) send_via_shortcut: IP ttl = %u, using LANE\n", mpc->dev->name, iph->ttl); 534 ddprintk("(%s) IP ttl = %u, using LANE\n",
535 mpc->dev->name, iph->ttl);
499 mpc->in_ops->put(entry); 536 mpc->in_ops->put(entry);
500 return 1; 537 return 1;
501 } 538 }
@@ -504,15 +541,18 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
504 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 541 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
505 542
506 if (entry->ctrl_info.tag != 0) { 543 if (entry->ctrl_info.tag != 0) {
507 ddprintk("mpoa: (%s) send_via_shortcut: adding tag 0x%x\n", mpc->dev->name, entry->ctrl_info.tag); 544 ddprintk("(%s) adding tag 0x%x\n",
545 mpc->dev->name, entry->ctrl_info.tag);
508 tagged_llc_snap_hdr.tag = entry->ctrl_info.tag; 546 tagged_llc_snap_hdr.tag = entry->ctrl_info.tag;
509 skb_pull(skb, ETH_HLEN); /* get rid of Eth header */ 547 skb_pull(skb, ETH_HLEN); /* get rid of Eth header */
510 skb_push(skb, sizeof(tagged_llc_snap_hdr)); /* add LLC/SNAP header */ 548 skb_push(skb, sizeof(tagged_llc_snap_hdr));
549 /* add LLC/SNAP header */
511 skb_copy_to_linear_data(skb, &tagged_llc_snap_hdr, 550 skb_copy_to_linear_data(skb, &tagged_llc_snap_hdr,
512 sizeof(tagged_llc_snap_hdr)); 551 sizeof(tagged_llc_snap_hdr));
513 } else { 552 } else {
514 skb_pull(skb, ETH_HLEN); /* get rid of Eth header */ 553 skb_pull(skb, ETH_HLEN); /* get rid of Eth header */
515 skb_push(skb, sizeof(struct llc_snap_hdr)); /* add LLC/SNAP header + tag */ 554 skb_push(skb, sizeof(struct llc_snap_hdr));
555 /* add LLC/SNAP header + tag */
516 skb_copy_to_linear_data(skb, &llc_snap_mpoa_data, 556 skb_copy_to_linear_data(skb, &llc_snap_mpoa_data,
517 sizeof(struct llc_snap_hdr)); 557 sizeof(struct llc_snap_hdr));
518 } 558 }
@@ -537,8 +577,8 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
537 int i = 0; 577 int i = 0;
538 578
539 mpc = find_mpc_by_lec(dev); /* this should NEVER fail */ 579 mpc = find_mpc_by_lec(dev); /* this should NEVER fail */
540 if(mpc == NULL) { 580 if (mpc == NULL) {
541 printk("mpoa: (%s) mpc_send_packet: no MPC found\n", dev->name); 581 pr_info("(%s) no MPC found\n", dev->name);
542 goto non_ip; 582 goto non_ip;
543 } 583 }
544 584
@@ -554,14 +594,15 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
554 goto non_ip; 594 goto non_ip;
555 595
556 while (i < mpc->number_of_mps_macs) { 596 while (i < mpc->number_of_mps_macs) {
557 if (!compare_ether_addr(eth->h_dest, (mpc->mps_macs + i*ETH_ALEN))) 597 if (!compare_ether_addr(eth->h_dest,
558 if ( send_via_shortcut(skb, mpc) == 0 ) /* try shortcut */ 598 (mpc->mps_macs + i*ETH_ALEN)))
559 return NETDEV_TX_OK; /* success! */ 599 if (send_via_shortcut(skb, mpc) == 0) /* try shortcut */
600 return NETDEV_TX_OK;
560 i++; 601 i++;
561 } 602 }
562 603
563 non_ip: 604non_ip:
564 return mpc->old_ops->ndo_start_xmit(skb,dev); 605 return mpc->old_ops->ndo_start_xmit(skb, dev);
565} 606}
566 607
567static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg) 608static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
@@ -574,7 +615,8 @@ static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
574 615
575 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmmpc_ioc)); 616 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmmpc_ioc));
576 if (bytes_left != 0) { 617 if (bytes_left != 0) {
577 printk("mpoa: mpc_vcc_attach: Short read (missed %d bytes) from userland\n", bytes_left); 618 pr_info("mpoa:Short read (missed %d bytes) from userland\n",
619 bytes_left);
578 return -EFAULT; 620 return -EFAULT;
579 } 621 }
580 ipaddr = ioc_data.ipaddr; 622 ipaddr = ioc_data.ipaddr;
@@ -587,18 +629,20 @@ static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
587 629
588 if (ioc_data.type == MPC_SOCKET_INGRESS) { 630 if (ioc_data.type == MPC_SOCKET_INGRESS) {
589 in_entry = mpc->in_ops->get(ipaddr, mpc); 631 in_entry = mpc->in_ops->get(ipaddr, mpc);
590 if (in_entry == NULL || in_entry->entry_state < INGRESS_RESOLVED) { 632 if (in_entry == NULL ||
591 printk("mpoa: (%s) mpc_vcc_attach: did not find RESOLVED entry from ingress cache\n", 633 in_entry->entry_state < INGRESS_RESOLVED) {
634 pr_info("(%s) did not find RESOLVED entry from ingress cache\n",
592 mpc->dev->name); 635 mpc->dev->name);
593 if (in_entry != NULL) mpc->in_ops->put(in_entry); 636 if (in_entry != NULL)
637 mpc->in_ops->put(in_entry);
594 return -EINVAL; 638 return -EINVAL;
595 } 639 }
596 printk("mpoa: (%s) mpc_vcc_attach: attaching ingress SVC, entry = %pI4\n", 640 pr_info("(%s) attaching ingress SVC, entry = %pI4\n",
597 mpc->dev->name, &in_entry->ctrl_info.in_dst_ip); 641 mpc->dev->name, &in_entry->ctrl_info.in_dst_ip);
598 in_entry->shortcut = vcc; 642 in_entry->shortcut = vcc;
599 mpc->in_ops->put(in_entry); 643 mpc->in_ops->put(in_entry);
600 } else { 644 } else {
601 printk("mpoa: (%s) mpc_vcc_attach: attaching egress SVC\n", mpc->dev->name); 645 pr_info("(%s) attaching egress SVC\n", mpc->dev->name);
602 } 646 }
603 647
604 vcc->proto_data = mpc->dev; 648 vcc->proto_data = mpc->dev;
@@ -618,27 +662,27 @@ static void mpc_vcc_close(struct atm_vcc *vcc, struct net_device *dev)
618 662
619 mpc = find_mpc_by_lec(dev); 663 mpc = find_mpc_by_lec(dev);
620 if (mpc == NULL) { 664 if (mpc == NULL) {
621 printk("mpoa: (%s) mpc_vcc_close: close for unknown MPC\n", dev->name); 665 pr_info("(%s) close for unknown MPC\n", dev->name);
622 return; 666 return;
623 } 667 }
624 668
625 dprintk("mpoa: (%s) mpc_vcc_close:\n", dev->name); 669 dprintk("(%s)\n", dev->name);
626 in_entry = mpc->in_ops->get_by_vcc(vcc, mpc); 670 in_entry = mpc->in_ops->get_by_vcc(vcc, mpc);
627 if (in_entry) { 671 if (in_entry) {
628 dprintk("mpoa: (%s) mpc_vcc_close: ingress SVC closed ip = %pI4\n", 672 dprintk("(%s) ingress SVC closed ip = %pI4\n",
629 mpc->dev->name, &in_entry->ctrl_info.in_dst_ip); 673 mpc->dev->name, &in_entry->ctrl_info.in_dst_ip);
630 in_entry->shortcut = NULL; 674 in_entry->shortcut = NULL;
631 mpc->in_ops->put(in_entry); 675 mpc->in_ops->put(in_entry);
632 } 676 }
633 eg_entry = mpc->eg_ops->get_by_vcc(vcc, mpc); 677 eg_entry = mpc->eg_ops->get_by_vcc(vcc, mpc);
634 if (eg_entry) { 678 if (eg_entry) {
635 dprintk("mpoa: (%s) mpc_vcc_close: egress SVC closed\n", mpc->dev->name); 679 dprintk("(%s) egress SVC closed\n", mpc->dev->name);
636 eg_entry->shortcut = NULL; 680 eg_entry->shortcut = NULL;
637 mpc->eg_ops->put(eg_entry); 681 mpc->eg_ops->put(eg_entry);
638 } 682 }
639 683
640 if (in_entry == NULL && eg_entry == NULL) 684 if (in_entry == NULL && eg_entry == NULL)
641 dprintk("mpoa: (%s) mpc_vcc_close: unused vcc closed\n", dev->name); 685 dprintk("(%s) unused vcc closed\n", dev->name);
642 686
643 return; 687 return;
644} 688}
@@ -652,18 +696,19 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
652 __be32 tag; 696 __be32 tag;
653 char *tmp; 697 char *tmp;
654 698
655 ddprintk("mpoa: (%s) mpc_push:\n", dev->name); 699 ddprintk("(%s)\n", dev->name);
656 if (skb == NULL) { 700 if (skb == NULL) {
657 dprintk("mpoa: (%s) mpc_push: null skb, closing VCC\n", dev->name); 701 dprintk("(%s) null skb, closing VCC\n", dev->name);
658 mpc_vcc_close(vcc, dev); 702 mpc_vcc_close(vcc, dev);
659 return; 703 return;
660 } 704 }
661 705
662 skb->dev = dev; 706 skb->dev = dev;
663 if (memcmp(skb->data, &llc_snap_mpoa_ctrl, sizeof(struct llc_snap_hdr)) == 0) { 707 if (memcmp(skb->data, &llc_snap_mpoa_ctrl,
708 sizeof(struct llc_snap_hdr)) == 0) {
664 struct sock *sk = sk_atm(vcc); 709 struct sock *sk = sk_atm(vcc);
665 710
666 dprintk("mpoa: (%s) mpc_push: control packet arrived\n", dev->name); 711 dprintk("(%s) control packet arrived\n", dev->name);
667 /* Pass control packets to daemon */ 712 /* Pass control packets to daemon */
668 skb_queue_tail(&sk->sk_receive_queue, skb); 713 skb_queue_tail(&sk->sk_receive_queue, skb);
669 sk->sk_data_ready(sk, skb->len); 714 sk->sk_data_ready(sk, skb->len);
@@ -675,20 +720,22 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
675 720
676 mpc = find_mpc_by_lec(dev); 721 mpc = find_mpc_by_lec(dev);
677 if (mpc == NULL) { 722 if (mpc == NULL) {
678 printk("mpoa: (%s) mpc_push: unknown MPC\n", dev->name); 723 pr_info("(%s) unknown MPC\n", dev->name);
679 return; 724 return;
680 } 725 }
681 726
682 if (memcmp(skb->data, &llc_snap_mpoa_data_tagged, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA tagged data */ 727 if (memcmp(skb->data, &llc_snap_mpoa_data_tagged,
683 ddprintk("mpoa: (%s) mpc_push: tagged data packet arrived\n", dev->name); 728 sizeof(struct llc_snap_hdr)) == 0) { /* MPOA tagged data */
729 ddprintk("(%s) tagged data packet arrived\n", dev->name);
684 730
685 } else if (memcmp(skb->data, &llc_snap_mpoa_data, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA data */ 731 } else if (memcmp(skb->data, &llc_snap_mpoa_data,
686 printk("mpoa: (%s) mpc_push: non-tagged data packet arrived\n", dev->name); 732 sizeof(struct llc_snap_hdr)) == 0) { /* MPOA data */
687 printk(" mpc_push: non-tagged data unsupported, purging\n"); 733 pr_info("(%s) Unsupported non-tagged data packet arrived. Purging\n",
734 dev->name);
688 dev_kfree_skb_any(skb); 735 dev_kfree_skb_any(skb);
689 return; 736 return;
690 } else { 737 } else {
691 printk("mpoa: (%s) mpc_push: garbage arrived, purging\n", dev->name); 738 pr_info("(%s) garbage arrived, purging\n", dev->name);
692 dev_kfree_skb_any(skb); 739 dev_kfree_skb_any(skb);
693 return; 740 return;
694 } 741 }
@@ -698,8 +745,8 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
698 745
699 eg = mpc->eg_ops->get_by_tag(tag, mpc); 746 eg = mpc->eg_ops->get_by_tag(tag, mpc);
700 if (eg == NULL) { 747 if (eg == NULL) {
701 printk("mpoa: (%s) mpc_push: Didn't find egress cache entry, tag = %u\n", 748 pr_info("mpoa: (%s) Didn't find egress cache entry, tag = %u\n",
702 dev->name,tag); 749 dev->name, tag);
703 purge_egress_shortcut(vcc, NULL); 750 purge_egress_shortcut(vcc, NULL);
704 dev_kfree_skb_any(skb); 751 dev_kfree_skb_any(skb);
705 return; 752 return;
@@ -711,13 +758,15 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
711 */ 758 */
712 if (eg->shortcut == NULL) { 759 if (eg->shortcut == NULL) {
713 eg->shortcut = vcc; 760 eg->shortcut = vcc;
714 printk("mpoa: (%s) mpc_push: egress SVC in use\n", dev->name); 761 pr_info("(%s) egress SVC in use\n", dev->name);
715 } 762 }
716 763
717 skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag)); /* get rid of LLC/SNAP header */ 764 skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag));
718 new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length); /* LLC/SNAP is shorter than MAC header :( */ 765 /* get rid of LLC/SNAP header */
766 new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length);
767 /* LLC/SNAP is shorter than MAC header :( */
719 dev_kfree_skb_any(skb); 768 dev_kfree_skb_any(skb);
720 if (new_skb == NULL){ 769 if (new_skb == NULL) {
721 mpc->eg_ops->put(eg); 770 mpc->eg_ops->put(eg);
722 return; 771 return;
723 } 772 }
@@ -750,7 +799,7 @@ static struct atm_dev mpc_dev = {
750 /* members not explicitly initialised will be 0 */ 799 /* members not explicitly initialised will be 0 */
751}; 800};
752 801
753static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg) 802static int atm_mpoa_mpoad_attach(struct atm_vcc *vcc, int arg)
754{ 803{
755 struct mpoa_client *mpc; 804 struct mpoa_client *mpc;
756 struct lec_priv *priv; 805 struct lec_priv *priv;
@@ -770,15 +819,16 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
770 819
771 mpc = find_mpc_by_itfnum(arg); 820 mpc = find_mpc_by_itfnum(arg);
772 if (mpc == NULL) { 821 if (mpc == NULL) {
773 dprintk("mpoa: mpoad_attach: allocating new mpc for itf %d\n", arg); 822 dprintk("allocating new mpc for itf %d\n", arg);
774 mpc = alloc_mpc(); 823 mpc = alloc_mpc();
775 if (mpc == NULL) 824 if (mpc == NULL)
776 return -ENOMEM; 825 return -ENOMEM;
777 mpc->dev_num = arg; 826 mpc->dev_num = arg;
778 mpc->dev = find_lec_by_itfnum(arg); /* NULL if there was no lec */ 827 mpc->dev = find_lec_by_itfnum(arg);
828 /* NULL if there was no lec */
779 } 829 }
780 if (mpc->mpoad_vcc) { 830 if (mpc->mpoad_vcc) {
781 printk("mpoa: mpoad_attach: mpoad is already present for itf %d\n", arg); 831 pr_info("mpoad is already present for itf %d\n", arg);
782 return -EADDRINUSE; 832 return -EADDRINUSE;
783 } 833 }
784 834
@@ -794,8 +844,8 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
794 mpc->mpoad_vcc = vcc; 844 mpc->mpoad_vcc = vcc;
795 vcc->dev = &mpc_dev; 845 vcc->dev = &mpc_dev;
796 vcc_insert_socket(sk_atm(vcc)); 846 vcc_insert_socket(sk_atm(vcc));
797 set_bit(ATM_VF_META,&vcc->flags); 847 set_bit(ATM_VF_META, &vcc->flags);
798 set_bit(ATM_VF_READY,&vcc->flags); 848 set_bit(ATM_VF_READY, &vcc->flags);
799 849
800 if (mpc->dev) { 850 if (mpc->dev) {
801 char empty[ATM_ESA_LEN]; 851 char empty[ATM_ESA_LEN];
@@ -805,7 +855,7 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
805 /* set address if mpcd e.g. gets killed and restarted. 855 /* set address if mpcd e.g. gets killed and restarted.
806 * If we do not do it now we have to wait for the next LE_ARP 856 * If we do not do it now we have to wait for the next LE_ARP
807 */ 857 */
808 if ( memcmp(mpc->mps_ctrl_addr, empty, ATM_ESA_LEN) != 0 ) 858 if (memcmp(mpc->mps_ctrl_addr, empty, ATM_ESA_LEN) != 0)
809 send_set_mps_ctrl_addr(mpc->mps_ctrl_addr, mpc); 859 send_set_mps_ctrl_addr(mpc->mps_ctrl_addr, mpc);
810 } 860 }
811 861
@@ -817,7 +867,7 @@ static void send_set_mps_ctrl_addr(const char *addr, struct mpoa_client *mpc)
817{ 867{
818 struct k_message mesg; 868 struct k_message mesg;
819 869
820 memcpy (mpc->mps_ctrl_addr, addr, ATM_ESA_LEN); 870 memcpy(mpc->mps_ctrl_addr, addr, ATM_ESA_LEN);
821 871
822 mesg.type = SET_MPS_CTRL_ADDR; 872 mesg.type = SET_MPS_CTRL_ADDR;
823 memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN); 873 memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN);
@@ -833,11 +883,11 @@ static void mpoad_close(struct atm_vcc *vcc)
833 883
834 mpc = find_mpc_by_vcc(vcc); 884 mpc = find_mpc_by_vcc(vcc);
835 if (mpc == NULL) { 885 if (mpc == NULL) {
836 printk("mpoa: mpoad_close: did not find MPC\n"); 886 pr_info("did not find MPC\n");
837 return; 887 return;
838 } 888 }
839 if (!mpc->mpoad_vcc) { 889 if (!mpc->mpoad_vcc) {
840 printk("mpoa: mpoad_close: close for non-present mpoad\n"); 890 pr_info("close for non-present mpoad\n");
841 return; 891 return;
842 } 892 }
843 893
@@ -857,7 +907,7 @@ static void mpoad_close(struct atm_vcc *vcc)
857 kfree_skb(skb); 907 kfree_skb(skb);
858 } 908 }
859 909
860 printk("mpoa: (%s) going down\n", 910 pr_info("(%s) going down\n",
861 (mpc->dev) ? mpc->dev->name : "<unknown>"); 911 (mpc->dev) ? mpc->dev->name : "<unknown>");
862 module_put(THIS_MODULE); 912 module_put(THIS_MODULE);
863 913
@@ -871,61 +921,61 @@ static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb)
871{ 921{
872 922
873 struct mpoa_client *mpc = find_mpc_by_vcc(vcc); 923 struct mpoa_client *mpc = find_mpc_by_vcc(vcc);
874 struct k_message *mesg = (struct k_message*)skb->data; 924 struct k_message *mesg = (struct k_message *)skb->data;
875 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 925 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
876 926
877 if (mpc == NULL) { 927 if (mpc == NULL) {
878 printk("mpoa: msg_from_mpoad: no mpc found\n"); 928 pr_info("no mpc found\n");
879 return 0; 929 return 0;
880 } 930 }
881 dprintk("mpoa: (%s) msg_from_mpoad:", (mpc->dev) ? mpc->dev->name : "<unknown>"); 931 dprintk("(%s)", mpc->dev ? mpc->dev->name : "<unknown>");
882 switch(mesg->type) { 932 switch (mesg->type) {
883 case MPOA_RES_REPLY_RCVD: 933 case MPOA_RES_REPLY_RCVD:
884 dprintk(" mpoa_res_reply_rcvd\n"); 934 dprintk_cont("mpoa_res_reply_rcvd\n");
885 MPOA_res_reply_rcvd(mesg, mpc); 935 MPOA_res_reply_rcvd(mesg, mpc);
886 break; 936 break;
887 case MPOA_TRIGGER_RCVD: 937 case MPOA_TRIGGER_RCVD:
888 dprintk(" mpoa_trigger_rcvd\n"); 938 dprintk_cont("mpoa_trigger_rcvd\n");
889 MPOA_trigger_rcvd(mesg, mpc); 939 MPOA_trigger_rcvd(mesg, mpc);
890 break; 940 break;
891 case INGRESS_PURGE_RCVD: 941 case INGRESS_PURGE_RCVD:
892 dprintk(" nhrp_purge_rcvd\n"); 942 dprintk_cont("nhrp_purge_rcvd\n");
893 ingress_purge_rcvd(mesg, mpc); 943 ingress_purge_rcvd(mesg, mpc);
894 break; 944 break;
895 case EGRESS_PURGE_RCVD: 945 case EGRESS_PURGE_RCVD:
896 dprintk(" egress_purge_reply_rcvd\n"); 946 dprintk_cont("egress_purge_reply_rcvd\n");
897 egress_purge_rcvd(mesg, mpc); 947 egress_purge_rcvd(mesg, mpc);
898 break; 948 break;
899 case MPS_DEATH: 949 case MPS_DEATH:
900 dprintk(" mps_death\n"); 950 dprintk_cont("mps_death\n");
901 mps_death(mesg, mpc); 951 mps_death(mesg, mpc);
902 break; 952 break;
903 case CACHE_IMPOS_RCVD: 953 case CACHE_IMPOS_RCVD:
904 dprintk(" cache_impos_rcvd\n"); 954 dprintk_cont("cache_impos_rcvd\n");
905 MPOA_cache_impos_rcvd(mesg, mpc); 955 MPOA_cache_impos_rcvd(mesg, mpc);
906 break; 956 break;
907 case SET_MPC_CTRL_ADDR: 957 case SET_MPC_CTRL_ADDR:
908 dprintk(" set_mpc_ctrl_addr\n"); 958 dprintk_cont("set_mpc_ctrl_addr\n");
909 set_mpc_ctrl_addr_rcvd(mesg, mpc); 959 set_mpc_ctrl_addr_rcvd(mesg, mpc);
910 break; 960 break;
911 case SET_MPS_MAC_ADDR: 961 case SET_MPS_MAC_ADDR:
912 dprintk(" set_mps_mac_addr\n"); 962 dprintk_cont("set_mps_mac_addr\n");
913 set_mps_mac_addr_rcvd(mesg, mpc); 963 set_mps_mac_addr_rcvd(mesg, mpc);
914 break; 964 break;
915 case CLEAN_UP_AND_EXIT: 965 case CLEAN_UP_AND_EXIT:
916 dprintk(" clean_up_and_exit\n"); 966 dprintk_cont("clean_up_and_exit\n");
917 clean_up(mesg, mpc, DIE); 967 clean_up(mesg, mpc, DIE);
918 break; 968 break;
919 case RELOAD: 969 case RELOAD:
920 dprintk(" reload\n"); 970 dprintk_cont("reload\n");
921 clean_up(mesg, mpc, RELOAD); 971 clean_up(mesg, mpc, RELOAD);
922 break; 972 break;
923 case SET_MPC_PARAMS: 973 case SET_MPC_PARAMS:
924 dprintk(" set_mpc_params\n"); 974 dprintk_cont("set_mpc_params\n");
925 mpc->parameters = mesg->content.params; 975 mpc->parameters = mesg->content.params;
926 break; 976 break;
927 default: 977 default:
928 dprintk(" unknown message %d\n", mesg->type); 978 dprintk_cont("unknown message %d\n", mesg->type);
929 break; 979 break;
930 } 980 }
931 kfree_skb(skb); 981 kfree_skb(skb);
@@ -940,7 +990,7 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
940 struct sock *sk; 990 struct sock *sk;
941 991
942 if (mpc == NULL || !mpc->mpoad_vcc) { 992 if (mpc == NULL || !mpc->mpoad_vcc) {
943 printk("mpoa: msg_to_mpoad: mesg %d to a non-existent mpoad\n", mesg->type); 993 pr_info("mesg %d to a non-existent mpoad\n", mesg->type);
944 return -ENXIO; 994 return -ENXIO;
945 } 995 }
946 996
@@ -958,7 +1008,8 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
958 return 0; 1008 return 0;
959} 1009}
960 1010
961static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned long event, void *dev_ptr) 1011static int mpoa_event_listener(struct notifier_block *mpoa_notifier,
1012 unsigned long event, void *dev_ptr)
962{ 1013{
963 struct net_device *dev; 1014 struct net_device *dev;
964 struct mpoa_client *mpc; 1015 struct mpoa_client *mpc;
@@ -980,25 +1031,24 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo
980 priv->lane2_ops->associate_indicator = lane2_assoc_ind; 1031 priv->lane2_ops->associate_indicator = lane2_assoc_ind;
981 mpc = find_mpc_by_itfnum(priv->itfnum); 1032 mpc = find_mpc_by_itfnum(priv->itfnum);
982 if (mpc == NULL) { 1033 if (mpc == NULL) {
983 dprintk("mpoa: mpoa_event_listener: allocating new mpc for %s\n", 1034 dprintk("allocating new mpc for %s\n", dev->name);
984 dev->name);
985 mpc = alloc_mpc(); 1035 mpc = alloc_mpc();
986 if (mpc == NULL) { 1036 if (mpc == NULL) {
987 printk("mpoa: mpoa_event_listener: no new mpc"); 1037 pr_info("no new mpc");
988 break; 1038 break;
989 } 1039 }
990 } 1040 }
991 mpc->dev_num = priv->itfnum; 1041 mpc->dev_num = priv->itfnum;
992 mpc->dev = dev; 1042 mpc->dev = dev;
993 dev_hold(dev); 1043 dev_hold(dev);
994 dprintk("mpoa: (%s) was initialized\n", dev->name); 1044 dprintk("(%s) was initialized\n", dev->name);
995 break; 1045 break;
996 case NETDEV_UNREGISTER: 1046 case NETDEV_UNREGISTER:
997 /* the lec device was deallocated */ 1047 /* the lec device was deallocated */
998 mpc = find_mpc_by_lec(dev); 1048 mpc = find_mpc_by_lec(dev);
999 if (mpc == NULL) 1049 if (mpc == NULL)
1000 break; 1050 break;
1001 dprintk("mpoa: device (%s) was deallocated\n", dev->name); 1051 dprintk("device (%s) was deallocated\n", dev->name);
1002 stop_mpc(mpc); 1052 stop_mpc(mpc);
1003 dev_put(mpc->dev); 1053 dev_put(mpc->dev);
1004 mpc->dev = NULL; 1054 mpc->dev = NULL;
@@ -1008,9 +1058,8 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo
1008 mpc = find_mpc_by_lec(dev); 1058 mpc = find_mpc_by_lec(dev);
1009 if (mpc == NULL) 1059 if (mpc == NULL)
1010 break; 1060 break;
1011 if (mpc->mpoad_vcc != NULL) { 1061 if (mpc->mpoad_vcc != NULL)
1012 start_mpc(mpc, dev); 1062 start_mpc(mpc, dev);
1013 }
1014 break; 1063 break;
1015 case NETDEV_DOWN: 1064 case NETDEV_DOWN:
1016 /* the dev was ifconfig'ed down */ 1065 /* the dev was ifconfig'ed down */
@@ -1020,9 +1069,8 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo
1020 mpc = find_mpc_by_lec(dev); 1069 mpc = find_mpc_by_lec(dev);
1021 if (mpc == NULL) 1070 if (mpc == NULL)
1022 break; 1071 break;
1023 if (mpc->mpoad_vcc != NULL) { 1072 if (mpc->mpoad_vcc != NULL)
1024 stop_mpc(mpc); 1073 stop_mpc(mpc);
1025 }
1026 break; 1074 break;
1027 case NETDEV_REBOOT: 1075 case NETDEV_REBOOT:
1028 case NETDEV_CHANGE: 1076 case NETDEV_CHANGE:
@@ -1049,7 +1097,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1049 in_cache_entry *entry; 1097 in_cache_entry *entry;
1050 1098
1051 entry = mpc->in_ops->get(dst_ip, mpc); 1099 entry = mpc->in_ops->get(dst_ip, mpc);
1052 if(entry == NULL){ 1100 if (entry == NULL) {
1053 entry = mpc->in_ops->add_entry(dst_ip, mpc); 1101 entry = mpc->in_ops->add_entry(dst_ip, mpc);
1054 entry->entry_state = INGRESS_RESOLVING; 1102 entry->entry_state = INGRESS_RESOLVING;
1055 msg->type = SND_MPOA_RES_RQST; 1103 msg->type = SND_MPOA_RES_RQST;
@@ -1060,7 +1108,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1060 return; 1108 return;
1061 } 1109 }
1062 1110
1063 if(entry->entry_state == INGRESS_INVALID){ 1111 if (entry->entry_state == INGRESS_INVALID) {
1064 entry->entry_state = INGRESS_RESOLVING; 1112 entry->entry_state = INGRESS_RESOLVING;
1065 msg->type = SND_MPOA_RES_RQST; 1113 msg->type = SND_MPOA_RES_RQST;
1066 msg->content.in_info = entry->ctrl_info; 1114 msg->content.in_info = entry->ctrl_info;
@@ -1070,7 +1118,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1070 return; 1118 return;
1071 } 1119 }
1072 1120
1073 printk("mpoa: (%s) MPOA_trigger_rcvd: entry already in resolving state\n", 1121 pr_info("(%s) entry already in resolving state\n",
1074 (mpc->dev) ? mpc->dev->name : "<unknown>"); 1122 (mpc->dev) ? mpc->dev->name : "<unknown>");
1075 mpc->in_ops->put(entry); 1123 mpc->in_ops->put(entry);
1076 return; 1124 return;
@@ -1080,23 +1128,25 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1080 * Things get complicated because we have to check if there's an egress 1128 * Things get complicated because we have to check if there's an egress
1081 * shortcut with suitable traffic parameters we could use. 1129 * shortcut with suitable traffic parameters we could use.
1082 */ 1130 */
1083static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_client *client, in_cache_entry *entry) 1131static void check_qos_and_open_shortcut(struct k_message *msg,
1132 struct mpoa_client *client,
1133 in_cache_entry *entry)
1084{ 1134{
1085 __be32 dst_ip = msg->content.in_info.in_dst_ip; 1135 __be32 dst_ip = msg->content.in_info.in_dst_ip;
1086 struct atm_mpoa_qos *qos = atm_mpoa_search_qos(dst_ip); 1136 struct atm_mpoa_qos *qos = atm_mpoa_search_qos(dst_ip);
1087 eg_cache_entry *eg_entry = client->eg_ops->get_by_src_ip(dst_ip, client); 1137 eg_cache_entry *eg_entry = client->eg_ops->get_by_src_ip(dst_ip, client);
1088 1138
1089 if(eg_entry && eg_entry->shortcut){ 1139 if (eg_entry && eg_entry->shortcut) {
1090 if(eg_entry->shortcut->qos.txtp.traffic_class & 1140 if (eg_entry->shortcut->qos.txtp.traffic_class &
1091 msg->qos.txtp.traffic_class & 1141 msg->qos.txtp.traffic_class &
1092 (qos ? qos->qos.txtp.traffic_class : ATM_UBR | ATM_CBR)){ 1142 (qos ? qos->qos.txtp.traffic_class : ATM_UBR | ATM_CBR)) {
1093 if(eg_entry->shortcut->qos.txtp.traffic_class == ATM_UBR) 1143 if (eg_entry->shortcut->qos.txtp.traffic_class == ATM_UBR)
1094 entry->shortcut = eg_entry->shortcut; 1144 entry->shortcut = eg_entry->shortcut;
1095 else if(eg_entry->shortcut->qos.txtp.max_pcr > 0) 1145 else if (eg_entry->shortcut->qos.txtp.max_pcr > 0)
1096 entry->shortcut = eg_entry->shortcut; 1146 entry->shortcut = eg_entry->shortcut;
1097 } 1147 }
1098 if(entry->shortcut){ 1148 if (entry->shortcut) {
1099 dprintk("mpoa: (%s) using egress SVC to reach %pI4\n", 1149 dprintk("(%s) using egress SVC to reach %pI4\n",
1100 client->dev->name, &dst_ip); 1150 client->dev->name, &dst_ip);
1101 client->eg_ops->put(eg_entry); 1151 client->eg_ops->put(eg_entry);
1102 return; 1152 return;
@@ -1107,12 +1157,13 @@ static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_clien
1107 1157
1108 /* No luck in the egress cache we must open an ingress SVC */ 1158 /* No luck in the egress cache we must open an ingress SVC */
1109 msg->type = OPEN_INGRESS_SVC; 1159 msg->type = OPEN_INGRESS_SVC;
1110 if (qos && (qos->qos.txtp.traffic_class == msg->qos.txtp.traffic_class)) 1160 if (qos &&
1111 { 1161 (qos->qos.txtp.traffic_class == msg->qos.txtp.traffic_class)) {
1112 msg->qos = qos->qos; 1162 msg->qos = qos->qos;
1113 printk("mpoa: (%s) trying to get a CBR shortcut\n",client->dev->name); 1163 pr_info("(%s) trying to get a CBR shortcut\n",
1114 } 1164 client->dev->name);
1115 else memset(&msg->qos,0,sizeof(struct atm_qos)); 1165 } else
1166 memset(&msg->qos, 0, sizeof(struct atm_qos));
1116 msg_to_mpoad(msg, client); 1167 msg_to_mpoad(msg, client);
1117 return; 1168 return;
1118} 1169}
@@ -1122,17 +1173,19 @@ static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1122 __be32 dst_ip = msg->content.in_info.in_dst_ip; 1173 __be32 dst_ip = msg->content.in_info.in_dst_ip;
1123 in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc); 1174 in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc);
1124 1175
1125 dprintk("mpoa: (%s) MPOA_res_reply_rcvd: ip %pI4\n", 1176 dprintk("(%s) ip %pI4\n",
1126 mpc->dev->name, &dst_ip); 1177 mpc->dev->name, &dst_ip);
1127 ddprintk("mpoa: (%s) MPOA_res_reply_rcvd() entry = %p", mpc->dev->name, entry); 1178 ddprintk("(%s) entry = %p",
1128 if(entry == NULL){ 1179 mpc->dev->name, entry);
1129 printk("\nmpoa: (%s) ARGH, received res. reply for an entry that doesn't exist.\n", mpc->dev->name); 1180 if (entry == NULL) {
1181 pr_info("(%s) ARGH, received res. reply for an entry that doesn't exist.\n",
1182 mpc->dev->name);
1130 return; 1183 return;
1131 } 1184 }
1132 ddprintk(" entry_state = %d ", entry->entry_state); 1185 ddprintk_cont(" entry_state = %d ", entry->entry_state);
1133 1186
1134 if (entry->entry_state == INGRESS_RESOLVED) { 1187 if (entry->entry_state == INGRESS_RESOLVED) {
1135 printk("\nmpoa: (%s) MPOA_res_reply_rcvd for RESOLVED entry!\n", mpc->dev->name); 1188 pr_info("(%s) RESOLVED entry!\n", mpc->dev->name);
1136 mpc->in_ops->put(entry); 1189 mpc->in_ops->put(entry);
1137 return; 1190 return;
1138 } 1191 }
@@ -1141,17 +1194,18 @@ static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1141 do_gettimeofday(&(entry->tv)); 1194 do_gettimeofday(&(entry->tv));
1142 do_gettimeofday(&(entry->reply_wait)); /* Used in refreshing func from now on */ 1195 do_gettimeofday(&(entry->reply_wait)); /* Used in refreshing func from now on */
1143 entry->refresh_time = 0; 1196 entry->refresh_time = 0;
1144 ddprintk("entry->shortcut = %p\n", entry->shortcut); 1197 ddprintk_cont("entry->shortcut = %p\n", entry->shortcut);
1145 1198
1146 if(entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL){ 1199 if (entry->entry_state == INGRESS_RESOLVING &&
1200 entry->shortcut != NULL) {
1147 entry->entry_state = INGRESS_RESOLVED; 1201 entry->entry_state = INGRESS_RESOLVED;
1148 mpc->in_ops->put(entry); 1202 mpc->in_ops->put(entry);
1149 return; /* Shortcut already open... */ 1203 return; /* Shortcut already open... */
1150 } 1204 }
1151 1205
1152 if (entry->shortcut != NULL) { 1206 if (entry->shortcut != NULL) {
1153 printk("mpoa: (%s) MPOA_res_reply_rcvd: entry->shortcut != NULL, impossible!\n", 1207 pr_info("(%s) entry->shortcut != NULL, impossible!\n",
1154 mpc->dev->name); 1208 mpc->dev->name);
1155 mpc->in_ops->put(entry); 1209 mpc->in_ops->put(entry);
1156 return; 1210 return;
1157 } 1211 }
@@ -1170,14 +1224,14 @@ static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1170 __be32 mask = msg->ip_mask; 1224 __be32 mask = msg->ip_mask;
1171 in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask); 1225 in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask);
1172 1226
1173 if(entry == NULL){ 1227 if (entry == NULL) {
1174 printk("mpoa: (%s) ingress_purge_rcvd: purge for a non-existing entry, ip = %pI4\n", 1228 pr_info("(%s) purge for a non-existing entry, ip = %pI4\n",
1175 mpc->dev->name, &dst_ip); 1229 mpc->dev->name, &dst_ip);
1176 return; 1230 return;
1177 } 1231 }
1178 1232
1179 do { 1233 do {
1180 dprintk("mpoa: (%s) ingress_purge_rcvd: removing an ingress entry, ip = %pI4\n", 1234 dprintk("(%s) removing an ingress entry, ip = %pI4\n",
1181 mpc->dev->name, &dst_ip); 1235 mpc->dev->name, &dst_ip);
1182 write_lock_bh(&mpc->ingress_lock); 1236 write_lock_bh(&mpc->ingress_lock);
1183 mpc->in_ops->remove_entry(entry, mpc); 1237 mpc->in_ops->remove_entry(entry, mpc);
@@ -1195,7 +1249,8 @@ static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
1195 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc); 1249 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc);
1196 1250
1197 if (entry == NULL) { 1251 if (entry == NULL) {
1198 dprintk("mpoa: (%s) egress_purge_rcvd: purge for a non-existing entry\n", mpc->dev->name); 1252 dprintk("(%s) purge for a non-existing entry\n",
1253 mpc->dev->name);
1199 return; 1254 return;
1200 } 1255 }
1201 1256
@@ -1214,15 +1269,15 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
1214 struct k_message *purge_msg; 1269 struct k_message *purge_msg;
1215 struct sk_buff *skb; 1270 struct sk_buff *skb;
1216 1271
1217 dprintk("mpoa: purge_egress_shortcut: entering\n"); 1272 dprintk("entering\n");
1218 if (vcc == NULL) { 1273 if (vcc == NULL) {
1219 printk("mpoa: purge_egress_shortcut: vcc == NULL\n"); 1274 pr_info("vcc == NULL\n");
1220 return; 1275 return;
1221 } 1276 }
1222 1277
1223 skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC); 1278 skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC);
1224 if (skb == NULL) { 1279 if (skb == NULL) {
1225 printk("mpoa: purge_egress_shortcut: out of memory\n"); 1280 pr_info("out of memory\n");
1226 return; 1281 return;
1227 } 1282 }
1228 1283
@@ -1238,7 +1293,7 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
1238 sk = sk_atm(vcc); 1293 sk = sk_atm(vcc);
1239 skb_queue_tail(&sk->sk_receive_queue, skb); 1294 skb_queue_tail(&sk->sk_receive_queue, skb);
1240 sk->sk_data_ready(sk, skb->len); 1295 sk->sk_data_ready(sk, skb->len);
1241 dprintk("mpoa: purge_egress_shortcut: exiting:\n"); 1296 dprintk("exiting\n");
1242 1297
1243 return; 1298 return;
1244} 1299}
@@ -1247,14 +1302,14 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
1247 * Our MPS died. Tell our daemon to send NHRP data plane purge to each 1302 * Our MPS died. Tell our daemon to send NHRP data plane purge to each
1248 * of the egress shortcuts we have. 1303 * of the egress shortcuts we have.
1249 */ 1304 */
1250static void mps_death( struct k_message * msg, struct mpoa_client * mpc ) 1305static void mps_death(struct k_message *msg, struct mpoa_client *mpc)
1251{ 1306{
1252 eg_cache_entry *entry; 1307 eg_cache_entry *entry;
1253 1308
1254 dprintk("mpoa: (%s) mps_death:\n", mpc->dev->name); 1309 dprintk("(%s)\n", mpc->dev->name);
1255 1310
1256 if(memcmp(msg->MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN)){ 1311 if (memcmp(msg->MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN)) {
1257 printk("mpoa: (%s) mps_death: wrong MPS\n", mpc->dev->name); 1312 pr_info("(%s) wrong MPS\n", mpc->dev->name);
1258 return; 1313 return;
1259 } 1314 }
1260 1315
@@ -1273,20 +1328,21 @@ static void mps_death( struct k_message * msg, struct mpoa_client * mpc )
1273 return; 1328 return;
1274} 1329}
1275 1330
1276static void MPOA_cache_impos_rcvd( struct k_message * msg, struct mpoa_client * mpc) 1331static void MPOA_cache_impos_rcvd(struct k_message *msg,
1332 struct mpoa_client *mpc)
1277{ 1333{
1278 uint16_t holding_time; 1334 uint16_t holding_time;
1279 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(msg->content.eg_info.cache_id, mpc); 1335 eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(msg->content.eg_info.cache_id, mpc);
1280 1336
1281 holding_time = msg->content.eg_info.holding_time; 1337 holding_time = msg->content.eg_info.holding_time;
1282 dprintk("mpoa: (%s) MPOA_cache_impos_rcvd: entry = %p, holding_time = %u\n", 1338 dprintk("(%s) entry = %p, holding_time = %u\n",
1283 mpc->dev->name, entry, holding_time); 1339 mpc->dev->name, entry, holding_time);
1284 if(entry == NULL && holding_time) { 1340 if (entry == NULL && holding_time) {
1285 entry = mpc->eg_ops->add_entry(msg, mpc); 1341 entry = mpc->eg_ops->add_entry(msg, mpc);
1286 mpc->eg_ops->put(entry); 1342 mpc->eg_ops->put(entry);
1287 return; 1343 return;
1288 } 1344 }
1289 if(holding_time){ 1345 if (holding_time) {
1290 mpc->eg_ops->update(entry, holding_time); 1346 mpc->eg_ops->update(entry, holding_time);
1291 return; 1347 return;
1292 } 1348 }
@@ -1300,7 +1356,8 @@ static void MPOA_cache_impos_rcvd( struct k_message * msg, struct mpoa_client *
1300 return; 1356 return;
1301} 1357}
1302 1358
1303static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc) 1359static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg,
1360 struct mpoa_client *mpc)
1304{ 1361{
1305 struct lec_priv *priv; 1362 struct lec_priv *priv;
1306 int i, retval ; 1363 int i, retval ;
@@ -1315,34 +1372,39 @@ static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *m
1315 memcpy(&tlv[7], mesg->MPS_ctrl, ATM_ESA_LEN); /* MPC ctrl ATM addr */ 1372 memcpy(&tlv[7], mesg->MPS_ctrl, ATM_ESA_LEN); /* MPC ctrl ATM addr */
1316 memcpy(mpc->our_ctrl_addr, mesg->MPS_ctrl, ATM_ESA_LEN); 1373 memcpy(mpc->our_ctrl_addr, mesg->MPS_ctrl, ATM_ESA_LEN);
1317 1374
1318 dprintk("mpoa: (%s) setting MPC ctrl ATM address to ", 1375 dprintk("(%s) setting MPC ctrl ATM address to",
1319 (mpc->dev) ? mpc->dev->name : "<unknown>"); 1376 mpc->dev ? mpc->dev->name : "<unknown>");
1320 for (i = 7; i < sizeof(tlv); i++) 1377 for (i = 7; i < sizeof(tlv); i++)
1321 dprintk("%02x ", tlv[i]); 1378 dprintk_cont(" %02x", tlv[i]);
1322 dprintk("\n"); 1379 dprintk_cont("\n");
1323 1380
1324 if (mpc->dev) { 1381 if (mpc->dev) {
1325 priv = netdev_priv(mpc->dev); 1382 priv = netdev_priv(mpc->dev);
1326 retval = priv->lane2_ops->associate_req(mpc->dev, mpc->dev->dev_addr, tlv, sizeof(tlv)); 1383 retval = priv->lane2_ops->associate_req(mpc->dev,
1384 mpc->dev->dev_addr,
1385 tlv, sizeof(tlv));
1327 if (retval == 0) 1386 if (retval == 0)
1328 printk("mpoa: (%s) MPOA device type TLV association failed\n", mpc->dev->name); 1387 pr_info("(%s) MPOA device type TLV association failed\n",
1388 mpc->dev->name);
1329 retval = priv->lane2_ops->resolve(mpc->dev, NULL, 1, NULL, NULL); 1389 retval = priv->lane2_ops->resolve(mpc->dev, NULL, 1, NULL, NULL);
1330 if (retval < 0) 1390 if (retval < 0)
1331 printk("mpoa: (%s) targetless LE_ARP request failed\n", mpc->dev->name); 1391 pr_info("(%s) targetless LE_ARP request failed\n",
1392 mpc->dev->name);
1332 } 1393 }
1333 1394
1334 return; 1395 return;
1335} 1396}
1336 1397
1337static void set_mps_mac_addr_rcvd(struct k_message *msg, struct mpoa_client *client) 1398static void set_mps_mac_addr_rcvd(struct k_message *msg,
1399 struct mpoa_client *client)
1338{ 1400{
1339 1401
1340 if(client->number_of_mps_macs) 1402 if (client->number_of_mps_macs)
1341 kfree(client->mps_macs); 1403 kfree(client->mps_macs);
1342 client->number_of_mps_macs = 0; 1404 client->number_of_mps_macs = 0;
1343 client->mps_macs = kmemdup(msg->MPS_ctrl, ETH_ALEN, GFP_KERNEL); 1405 client->mps_macs = kmemdup(msg->MPS_ctrl, ETH_ALEN, GFP_KERNEL);
1344 if (client->mps_macs == NULL) { 1406 if (client->mps_macs == NULL) {
1345 printk("mpoa: set_mps_mac_addr_rcvd: out of memory\n"); 1407 pr_info("out of memory\n");
1346 return; 1408 return;
1347 } 1409 }
1348 client->number_of_mps_macs = 1; 1410 client->number_of_mps_macs = 1;
@@ -1363,11 +1425,11 @@ static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action)
1363 /* FIXME: This knows too much of the cache structure */ 1425 /* FIXME: This knows too much of the cache structure */
1364 read_lock_irq(&mpc->egress_lock); 1426 read_lock_irq(&mpc->egress_lock);
1365 entry = mpc->eg_cache; 1427 entry = mpc->eg_cache;
1366 while (entry != NULL){ 1428 while (entry != NULL) {
1367 msg->content.eg_info = entry->ctrl_info; 1429 msg->content.eg_info = entry->ctrl_info;
1368 dprintk("mpoa: cache_id %u\n", entry->ctrl_info.cache_id); 1430 dprintk("cache_id %u\n", entry->ctrl_info.cache_id);
1369 msg_to_mpoad(msg, mpc); 1431 msg_to_mpoad(msg, mpc);
1370 entry = entry->next; 1432 entry = entry->next;
1371 } 1433 }
1372 read_unlock_irq(&mpc->egress_lock); 1434 read_unlock_irq(&mpc->egress_lock);
1373 1435
@@ -1386,20 +1448,22 @@ static void mpc_timer_refresh(void)
1386 return; 1448 return;
1387} 1449}
1388 1450
1389static void mpc_cache_check( unsigned long checking_time ) 1451static void mpc_cache_check(unsigned long checking_time)
1390{ 1452{
1391 struct mpoa_client *mpc = mpcs; 1453 struct mpoa_client *mpc = mpcs;
1392 static unsigned long previous_resolving_check_time; 1454 static unsigned long previous_resolving_check_time;
1393 static unsigned long previous_refresh_time; 1455 static unsigned long previous_refresh_time;
1394 1456
1395 while( mpc != NULL ){ 1457 while (mpc != NULL) {
1396 mpc->in_ops->clear_count(mpc); 1458 mpc->in_ops->clear_count(mpc);
1397 mpc->eg_ops->clear_expired(mpc); 1459 mpc->eg_ops->clear_expired(mpc);
1398 if(checking_time - previous_resolving_check_time > mpc->parameters.mpc_p4 * HZ ){ 1460 if (checking_time - previous_resolving_check_time >
1461 mpc->parameters.mpc_p4 * HZ) {
1399 mpc->in_ops->check_resolving(mpc); 1462 mpc->in_ops->check_resolving(mpc);
1400 previous_resolving_check_time = checking_time; 1463 previous_resolving_check_time = checking_time;
1401 } 1464 }
1402 if(checking_time - previous_refresh_time > mpc->parameters.mpc_p5 * HZ ){ 1465 if (checking_time - previous_refresh_time >
1466 mpc->parameters.mpc_p5 * HZ) {
1403 mpc->in_ops->refresh(mpc); 1467 mpc->in_ops->refresh(mpc);
1404 previous_refresh_time = checking_time; 1468 previous_refresh_time = checking_time;
1405 } 1469 }
@@ -1410,7 +1474,8 @@ static void mpc_cache_check( unsigned long checking_time )
1410 return; 1474 return;
1411} 1475}
1412 1476
1413static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1477static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd,
1478 unsigned long arg)
1414{ 1479{
1415 int err = 0; 1480 int err = 0;
1416 struct atm_vcc *vcc = ATM_SD(sock); 1481 struct atm_vcc *vcc = ATM_SD(sock);
@@ -1422,21 +1487,20 @@ static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd, unsigned long a
1422 return -EPERM; 1487 return -EPERM;
1423 1488
1424 switch (cmd) { 1489 switch (cmd) {
1425 case ATMMPC_CTRL: 1490 case ATMMPC_CTRL:
1426 err = atm_mpoa_mpoad_attach(vcc, (int)arg); 1491 err = atm_mpoa_mpoad_attach(vcc, (int)arg);
1427 if (err >= 0) 1492 if (err >= 0)
1428 sock->state = SS_CONNECTED; 1493 sock->state = SS_CONNECTED;
1429 break; 1494 break;
1430 case ATMMPC_DATA: 1495 case ATMMPC_DATA:
1431 err = atm_mpoa_vcc_attach(vcc, (void __user *)arg); 1496 err = atm_mpoa_vcc_attach(vcc, (void __user *)arg);
1432 break; 1497 break;
1433 default: 1498 default:
1434 break; 1499 break;
1435 } 1500 }
1436 return err; 1501 return err;
1437} 1502}
1438 1503
1439
1440static struct atm_ioctl atm_ioctl_ops = { 1504static struct atm_ioctl atm_ioctl_ops = {
1441 .owner = THIS_MODULE, 1505 .owner = THIS_MODULE,
1442 .ioctl = atm_mpoa_ioctl, 1506 .ioctl = atm_mpoa_ioctl,
@@ -1447,9 +1511,9 @@ static __init int atm_mpoa_init(void)
1447 register_atm_ioctl(&atm_ioctl_ops); 1511 register_atm_ioctl(&atm_ioctl_ops);
1448 1512
1449 if (mpc_proc_init() != 0) 1513 if (mpc_proc_init() != 0)
1450 printk(KERN_INFO "mpoa: failed to initialize /proc/mpoa\n"); 1514 pr_info("failed to initialize /proc/mpoa\n");
1451 1515
1452 printk("mpc.c: " __DATE__ " " __TIME__ " initialized\n"); 1516 pr_info("mpc.c: " __DATE__ " " __TIME__ " initialized\n");
1453 1517
1454 return 0; 1518 return 0;
1455} 1519}
@@ -1476,15 +1540,15 @@ static void __exit atm_mpoa_cleanup(void)
1476 if (priv->lane2_ops != NULL) 1540 if (priv->lane2_ops != NULL)
1477 priv->lane2_ops->associate_indicator = NULL; 1541 priv->lane2_ops->associate_indicator = NULL;
1478 } 1542 }
1479 ddprintk("mpoa: cleanup_module: about to clear caches\n"); 1543 ddprintk("about to clear caches\n");
1480 mpc->in_ops->destroy_cache(mpc); 1544 mpc->in_ops->destroy_cache(mpc);
1481 mpc->eg_ops->destroy_cache(mpc); 1545 mpc->eg_ops->destroy_cache(mpc);
1482 ddprintk("mpoa: cleanup_module: caches cleared\n"); 1546 ddprintk("caches cleared\n");
1483 kfree(mpc->mps_macs); 1547 kfree(mpc->mps_macs);
1484 memset(mpc, 0, sizeof(struct mpoa_client)); 1548 memset(mpc, 0, sizeof(struct mpoa_client));
1485 ddprintk("mpoa: cleanup_module: about to kfree %p\n", mpc); 1549 ddprintk("about to kfree %p\n", mpc);
1486 kfree(mpc); 1550 kfree(mpc);
1487 ddprintk("mpoa: cleanup_module: next mpc is at %p\n", tmp); 1551 ddprintk("next mpc is at %p\n", tmp);
1488 mpc = tmp; 1552 mpc = tmp;
1489 } 1553 }
1490 1554
@@ -1492,7 +1556,7 @@ static void __exit atm_mpoa_cleanup(void)
1492 qos_head = NULL; 1556 qos_head = NULL;
1493 while (qos != NULL) { 1557 while (qos != NULL) {
1494 nextqos = qos->next; 1558 nextqos = qos->next;
1495 dprintk("mpoa: cleanup_module: freeing qos entry %p\n", qos); 1559 dprintk("freeing qos entry %p\n", qos);
1496 kfree(qos); 1560 kfree(qos);
1497 qos = nextqos; 1561 qos = nextqos;
1498 } 1562 }
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
index 4504a4b339b..4c141810eb6 100644
--- a/net/atm/mpoa_caches.c
+++ b/net/atm/mpoa_caches.c
@@ -11,15 +11,23 @@
11 */ 11 */
12 12
13#if 0 13#if 0
14#define dprintk printk /* debug */ 14#define dprintk(format, args...) \
15 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
15#else 16#else
16#define dprintk(format,args...) 17#define dprintk(format, args...) \
18 do { if (0) \
19 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
20 } while (0)
17#endif 21#endif
18 22
19#if 0 23#if 0
20#define ddprintk printk /* more debug */ 24#define ddprintk(format, args...) \
25 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
21#else 26#else
22#define ddprintk(format,args...) 27#define ddprintk(format, args...) \
28 do { if (0) \
29 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
30 } while (0)
23#endif 31#endif
24 32
25static in_cache_entry *in_cache_get(__be32 dst_ip, 33static in_cache_entry *in_cache_get(__be32 dst_ip,
@@ -29,8 +37,8 @@ static in_cache_entry *in_cache_get(__be32 dst_ip,
29 37
30 read_lock_bh(&client->ingress_lock); 38 read_lock_bh(&client->ingress_lock);
31 entry = client->in_cache; 39 entry = client->in_cache;
32 while(entry != NULL){ 40 while (entry != NULL) {
33 if( entry->ctrl_info.in_dst_ip == dst_ip ){ 41 if (entry->ctrl_info.in_dst_ip == dst_ip) {
34 atomic_inc(&entry->use); 42 atomic_inc(&entry->use);
35 read_unlock_bh(&client->ingress_lock); 43 read_unlock_bh(&client->ingress_lock);
36 return entry; 44 return entry;
@@ -50,8 +58,8 @@ static in_cache_entry *in_cache_get_with_mask(__be32 dst_ip,
50 58
51 read_lock_bh(&client->ingress_lock); 59 read_lock_bh(&client->ingress_lock);
52 entry = client->in_cache; 60 entry = client->in_cache;
53 while(entry != NULL){ 61 while (entry != NULL) {
54 if((entry->ctrl_info.in_dst_ip & mask) == (dst_ip & mask )){ 62 if ((entry->ctrl_info.in_dst_ip & mask) == (dst_ip & mask)) {
55 atomic_inc(&entry->use); 63 atomic_inc(&entry->use);
56 read_unlock_bh(&client->ingress_lock); 64 read_unlock_bh(&client->ingress_lock);
57 return entry; 65 return entry;
@@ -65,14 +73,14 @@ static in_cache_entry *in_cache_get_with_mask(__be32 dst_ip,
65} 73}
66 74
67static in_cache_entry *in_cache_get_by_vcc(struct atm_vcc *vcc, 75static in_cache_entry *in_cache_get_by_vcc(struct atm_vcc *vcc,
68 struct mpoa_client *client ) 76 struct mpoa_client *client)
69{ 77{
70 in_cache_entry *entry; 78 in_cache_entry *entry;
71 79
72 read_lock_bh(&client->ingress_lock); 80 read_lock_bh(&client->ingress_lock);
73 entry = client->in_cache; 81 entry = client->in_cache;
74 while(entry != NULL){ 82 while (entry != NULL) {
75 if(entry->shortcut == vcc) { 83 if (entry->shortcut == vcc) {
76 atomic_inc(&entry->use); 84 atomic_inc(&entry->use);
77 read_unlock_bh(&client->ingress_lock); 85 read_unlock_bh(&client->ingress_lock);
78 return entry; 86 return entry;
@@ -90,14 +98,14 @@ static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
90 in_cache_entry *entry = kzalloc(sizeof(in_cache_entry), GFP_KERNEL); 98 in_cache_entry *entry = kzalloc(sizeof(in_cache_entry), GFP_KERNEL);
91 99
92 if (entry == NULL) { 100 if (entry == NULL) {
93 printk("mpoa: mpoa_caches.c: new_in_cache_entry: out of memory\n"); 101 pr_info("mpoa: mpoa_caches.c: new_in_cache_entry: out of memory\n");
94 return NULL; 102 return NULL;
95 } 103 }
96 104
97 dprintk("mpoa: mpoa_caches.c: adding an ingress entry, ip = %pI4\n", &dst_ip); 105 dprintk("adding an ingress entry, ip = %pI4\n", &dst_ip);
98 106
99 atomic_set(&entry->use, 1); 107 atomic_set(&entry->use, 1);
100 dprintk("mpoa: mpoa_caches.c: new_in_cache_entry: about to lock\n"); 108 dprintk("new_in_cache_entry: about to lock\n");
101 write_lock_bh(&client->ingress_lock); 109 write_lock_bh(&client->ingress_lock);
102 entry->next = client->in_cache; 110 entry->next = client->in_cache;
103 entry->prev = NULL; 111 entry->prev = NULL;
@@ -115,7 +123,7 @@ static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
115 atomic_inc(&entry->use); 123 atomic_inc(&entry->use);
116 124
117 write_unlock_bh(&client->ingress_lock); 125 write_unlock_bh(&client->ingress_lock);
118 dprintk("mpoa: mpoa_caches.c: new_in_cache_entry: unlocked\n"); 126 dprintk("new_in_cache_entry: unlocked\n");
119 127
120 return entry; 128 return entry;
121} 129}
@@ -126,39 +134,41 @@ static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc)
126 struct k_message msg; 134 struct k_message msg;
127 135
128 entry->count++; 136 entry->count++;
129 if(entry->entry_state == INGRESS_RESOLVED && entry->shortcut != NULL) 137 if (entry->entry_state == INGRESS_RESOLVED && entry->shortcut != NULL)
130 return OPEN; 138 return OPEN;
131 139
132 if(entry->entry_state == INGRESS_REFRESHING){ 140 if (entry->entry_state == INGRESS_REFRESHING) {
133 if(entry->count > mpc->parameters.mpc_p1){ 141 if (entry->count > mpc->parameters.mpc_p1) {
134 msg.type = SND_MPOA_RES_RQST; 142 msg.type = SND_MPOA_RES_RQST;
135 msg.content.in_info = entry->ctrl_info; 143 msg.content.in_info = entry->ctrl_info;
136 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN); 144 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN);
137 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip); 145 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
138 if (qos != NULL) msg.qos = qos->qos; 146 if (qos != NULL)
147 msg.qos = qos->qos;
139 msg_to_mpoad(&msg, mpc); 148 msg_to_mpoad(&msg, mpc);
140 do_gettimeofday(&(entry->reply_wait)); 149 do_gettimeofday(&(entry->reply_wait));
141 entry->entry_state = INGRESS_RESOLVING; 150 entry->entry_state = INGRESS_RESOLVING;
142 } 151 }
143 if(entry->shortcut != NULL) 152 if (entry->shortcut != NULL)
144 return OPEN; 153 return OPEN;
145 return CLOSED; 154 return CLOSED;
146 } 155 }
147 156
148 if(entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL) 157 if (entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL)
149 return OPEN; 158 return OPEN;
150 159
151 if( entry->count > mpc->parameters.mpc_p1 && 160 if (entry->count > mpc->parameters.mpc_p1 &&
152 entry->entry_state == INGRESS_INVALID){ 161 entry->entry_state == INGRESS_INVALID) {
153 dprintk("mpoa: (%s) mpoa_caches.c: threshold exceeded for ip %pI4, sending MPOA res req\n", 162 dprintk("(%s) threshold exceeded for ip %pI4, sending MPOA res req\n",
154 mpc->dev->name, &entry->ctrl_info.in_dst_ip); 163 mpc->dev->name, &entry->ctrl_info.in_dst_ip);
155 entry->entry_state = INGRESS_RESOLVING; 164 entry->entry_state = INGRESS_RESOLVING;
156 msg.type = SND_MPOA_RES_RQST; 165 msg.type = SND_MPOA_RES_RQST;
157 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN ); 166 memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN);
158 msg.content.in_info = entry->ctrl_info; 167 msg.content.in_info = entry->ctrl_info;
159 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip); 168 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
160 if (qos != NULL) msg.qos = qos->qos; 169 if (qos != NULL)
161 msg_to_mpoad( &msg, mpc); 170 msg.qos = qos->qos;
171 msg_to_mpoad(&msg, mpc);
162 do_gettimeofday(&(entry->reply_wait)); 172 do_gettimeofday(&(entry->reply_wait));
163 } 173 }
164 174
@@ -185,7 +195,7 @@ static void in_cache_remove_entry(in_cache_entry *entry,
185 struct k_message msg; 195 struct k_message msg;
186 196
187 vcc = entry->shortcut; 197 vcc = entry->shortcut;
188 dprintk("mpoa: mpoa_caches.c: removing an ingress entry, ip = %pI4\n", 198 dprintk("removing an ingress entry, ip = %pI4\n",
189 &entry->ctrl_info.in_dst_ip); 199 &entry->ctrl_info.in_dst_ip);
190 200
191 if (entry->prev != NULL) 201 if (entry->prev != NULL)
@@ -195,14 +205,15 @@ static void in_cache_remove_entry(in_cache_entry *entry,
195 if (entry->next != NULL) 205 if (entry->next != NULL)
196 entry->next->prev = entry->prev; 206 entry->next->prev = entry->prev;
197 client->in_ops->put(entry); 207 client->in_ops->put(entry);
198 if(client->in_cache == NULL && client->eg_cache == NULL){ 208 if (client->in_cache == NULL && client->eg_cache == NULL) {
199 msg.type = STOP_KEEP_ALIVE_SM; 209 msg.type = STOP_KEEP_ALIVE_SM;
200 msg_to_mpoad(&msg,client); 210 msg_to_mpoad(&msg, client);
201 } 211 }
202 212
203 /* Check if the egress side still uses this VCC */ 213 /* Check if the egress side still uses this VCC */
204 if (vcc != NULL) { 214 if (vcc != NULL) {
205 eg_cache_entry *eg_entry = client->eg_ops->get_by_vcc(vcc, client); 215 eg_cache_entry *eg_entry = client->eg_ops->get_by_vcc(vcc,
216 client);
206 if (eg_entry != NULL) { 217 if (eg_entry != NULL) {
207 client->eg_ops->put(eg_entry); 218 client->eg_ops->put(eg_entry);
208 return; 219 return;
@@ -213,7 +224,6 @@ static void in_cache_remove_entry(in_cache_entry *entry,
213 return; 224 return;
214} 225}
215 226
216
217/* Call this every MPC-p2 seconds... Not exactly correct solution, 227/* Call this every MPC-p2 seconds... Not exactly correct solution,
218 but an easy one... */ 228 but an easy one... */
219static void clear_count_and_expired(struct mpoa_client *client) 229static void clear_count_and_expired(struct mpoa_client *client)
@@ -225,12 +235,12 @@ static void clear_count_and_expired(struct mpoa_client *client)
225 235
226 write_lock_bh(&client->ingress_lock); 236 write_lock_bh(&client->ingress_lock);
227 entry = client->in_cache; 237 entry = client->in_cache;
228 while(entry != NULL){ 238 while (entry != NULL) {
229 entry->count=0; 239 entry->count = 0;
230 next_entry = entry->next; 240 next_entry = entry->next;
231 if((now.tv_sec - entry->tv.tv_sec) 241 if ((now.tv_sec - entry->tv.tv_sec)
232 > entry->ctrl_info.holding_time){ 242 > entry->ctrl_info.holding_time) {
233 dprintk("mpoa: mpoa_caches.c: holding time expired, ip = %pI4\n", 243 dprintk("holding time expired, ip = %pI4\n",
234 &entry->ctrl_info.in_dst_ip); 244 &entry->ctrl_info.in_dst_ip);
235 client->in_ops->remove_entry(entry, client); 245 client->in_ops->remove_entry(entry, client);
236 } 246 }
@@ -250,33 +260,38 @@ static void check_resolving_entries(struct mpoa_client *client)
250 struct timeval now; 260 struct timeval now;
251 struct k_message msg; 261 struct k_message msg;
252 262
253 do_gettimeofday( &now ); 263 do_gettimeofday(&now);
254 264
255 read_lock_bh(&client->ingress_lock); 265 read_lock_bh(&client->ingress_lock);
256 entry = client->in_cache; 266 entry = client->in_cache;
257 while( entry != NULL ){ 267 while (entry != NULL) {
258 if(entry->entry_state == INGRESS_RESOLVING){ 268 if (entry->entry_state == INGRESS_RESOLVING) {
259 if(now.tv_sec - entry->hold_down.tv_sec < client->parameters.mpc_p6){ 269 if ((now.tv_sec - entry->hold_down.tv_sec) <
260 entry = entry->next; /* Entry in hold down */ 270 client->parameters.mpc_p6) {
271 entry = entry->next; /* Entry in hold down */
261 continue; 272 continue;
262 } 273 }
263 if( (now.tv_sec - entry->reply_wait.tv_sec) > 274 if ((now.tv_sec - entry->reply_wait.tv_sec) >
264 entry->retry_time ){ 275 entry->retry_time) {
265 entry->retry_time = MPC_C1*( entry->retry_time ); 276 entry->retry_time = MPC_C1 * (entry->retry_time);
266 if(entry->retry_time > client->parameters.mpc_p5){ 277 /*
267 /* Retry time maximum exceeded, put entry in hold down. */ 278 * Retry time maximum exceeded,
279 * put entry in hold down.
280 */
281 if (entry->retry_time > client->parameters.mpc_p5) {
268 do_gettimeofday(&(entry->hold_down)); 282 do_gettimeofday(&(entry->hold_down));
269 entry->retry_time = client->parameters.mpc_p4; 283 entry->retry_time = client->parameters.mpc_p4;
270 entry = entry->next; 284 entry = entry->next;
271 continue; 285 continue;
272 } 286 }
273 /* Ask daemon to send a resolution request. */ 287 /* Ask daemon to send a resolution request. */
274 memset(&(entry->hold_down),0,sizeof(struct timeval)); 288 memset(&(entry->hold_down), 0, sizeof(struct timeval));
275 msg.type = SND_MPOA_RES_RTRY; 289 msg.type = SND_MPOA_RES_RTRY;
276 memcpy(msg.MPS_ctrl, client->mps_ctrl_addr, ATM_ESA_LEN); 290 memcpy(msg.MPS_ctrl, client->mps_ctrl_addr, ATM_ESA_LEN);
277 msg.content.in_info = entry->ctrl_info; 291 msg.content.in_info = entry->ctrl_info;
278 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip); 292 qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
279 if (qos != NULL) msg.qos = qos->qos; 293 if (qos != NULL)
294 msg.qos = qos->qos;
280 msg_to_mpoad(&msg, client); 295 msg_to_mpoad(&msg, client);
281 do_gettimeofday(&(entry->reply_wait)); 296 do_gettimeofday(&(entry->reply_wait));
282 } 297 }
@@ -292,16 +307,17 @@ static void refresh_entries(struct mpoa_client *client)
292 struct timeval now; 307 struct timeval now;
293 struct in_cache_entry *entry = client->in_cache; 308 struct in_cache_entry *entry = client->in_cache;
294 309
295 ddprintk("mpoa: mpoa_caches.c: refresh_entries\n"); 310 ddprintk("refresh_entries\n");
296 do_gettimeofday(&now); 311 do_gettimeofday(&now);
297 312
298 read_lock_bh(&client->ingress_lock); 313 read_lock_bh(&client->ingress_lock);
299 while( entry != NULL ){ 314 while (entry != NULL) {
300 if( entry->entry_state == INGRESS_RESOLVED ){ 315 if (entry->entry_state == INGRESS_RESOLVED) {
301 if(!(entry->refresh_time)) 316 if (!(entry->refresh_time))
302 entry->refresh_time = (2*(entry->ctrl_info.holding_time))/3; 317 entry->refresh_time = (2 * (entry->ctrl_info.holding_time))/3;
303 if( (now.tv_sec - entry->reply_wait.tv_sec) > entry->refresh_time ){ 318 if ((now.tv_sec - entry->reply_wait.tv_sec) >
304 dprintk("mpoa: mpoa_caches.c: refreshing an entry.\n"); 319 entry->refresh_time) {
320 dprintk("refreshing an entry.\n");
305 entry->entry_state = INGRESS_REFRESHING; 321 entry->entry_state = INGRESS_REFRESHING;
306 322
307 } 323 }
@@ -314,21 +330,22 @@ static void refresh_entries(struct mpoa_client *client)
314static void in_destroy_cache(struct mpoa_client *mpc) 330static void in_destroy_cache(struct mpoa_client *mpc)
315{ 331{
316 write_lock_irq(&mpc->ingress_lock); 332 write_lock_irq(&mpc->ingress_lock);
317 while(mpc->in_cache != NULL) 333 while (mpc->in_cache != NULL)
318 mpc->in_ops->remove_entry(mpc->in_cache, mpc); 334 mpc->in_ops->remove_entry(mpc->in_cache, mpc);
319 write_unlock_irq(&mpc->ingress_lock); 335 write_unlock_irq(&mpc->ingress_lock);
320 336
321 return; 337 return;
322} 338}
323 339
324static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id, struct mpoa_client *mpc) 340static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id,
341 struct mpoa_client *mpc)
325{ 342{
326 eg_cache_entry *entry; 343 eg_cache_entry *entry;
327 344
328 read_lock_irq(&mpc->egress_lock); 345 read_lock_irq(&mpc->egress_lock);
329 entry = mpc->eg_cache; 346 entry = mpc->eg_cache;
330 while(entry != NULL){ 347 while (entry != NULL) {
331 if(entry->ctrl_info.cache_id == cache_id){ 348 if (entry->ctrl_info.cache_id == cache_id) {
332 atomic_inc(&entry->use); 349 atomic_inc(&entry->use);
333 read_unlock_irq(&mpc->egress_lock); 350 read_unlock_irq(&mpc->egress_lock);
334 return entry; 351 return entry;
@@ -348,7 +365,7 @@ static eg_cache_entry *eg_cache_get_by_tag(__be32 tag, struct mpoa_client *mpc)
348 365
349 read_lock_irqsave(&mpc->egress_lock, flags); 366 read_lock_irqsave(&mpc->egress_lock, flags);
350 entry = mpc->eg_cache; 367 entry = mpc->eg_cache;
351 while (entry != NULL){ 368 while (entry != NULL) {
352 if (entry->ctrl_info.tag == tag) { 369 if (entry->ctrl_info.tag == tag) {
353 atomic_inc(&entry->use); 370 atomic_inc(&entry->use);
354 read_unlock_irqrestore(&mpc->egress_lock, flags); 371 read_unlock_irqrestore(&mpc->egress_lock, flags);
@@ -362,14 +379,15 @@ static eg_cache_entry *eg_cache_get_by_tag(__be32 tag, struct mpoa_client *mpc)
362} 379}
363 380
364/* This can be called from any context since it saves CPU flags */ 381/* This can be called from any context since it saves CPU flags */
365static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc, struct mpoa_client *mpc) 382static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc,
383 struct mpoa_client *mpc)
366{ 384{
367 unsigned long flags; 385 unsigned long flags;
368 eg_cache_entry *entry; 386 eg_cache_entry *entry;
369 387
370 read_lock_irqsave(&mpc->egress_lock, flags); 388 read_lock_irqsave(&mpc->egress_lock, flags);
371 entry = mpc->eg_cache; 389 entry = mpc->eg_cache;
372 while (entry != NULL){ 390 while (entry != NULL) {
373 if (entry->shortcut == vcc) { 391 if (entry->shortcut == vcc) {
374 atomic_inc(&entry->use); 392 atomic_inc(&entry->use);
375 read_unlock_irqrestore(&mpc->egress_lock, flags); 393 read_unlock_irqrestore(&mpc->egress_lock, flags);
@@ -382,14 +400,15 @@ static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc, struct mpoa_clie
382 return NULL; 400 return NULL;
383} 401}
384 402
385static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr, struct mpoa_client *mpc) 403static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr,
404 struct mpoa_client *mpc)
386{ 405{
387 eg_cache_entry *entry; 406 eg_cache_entry *entry;
388 407
389 read_lock_irq(&mpc->egress_lock); 408 read_lock_irq(&mpc->egress_lock);
390 entry = mpc->eg_cache; 409 entry = mpc->eg_cache;
391 while(entry != NULL){ 410 while (entry != NULL) {
392 if(entry->latest_ip_addr == ipaddr) { 411 if (entry->latest_ip_addr == ipaddr) {
393 atomic_inc(&entry->use); 412 atomic_inc(&entry->use);
394 read_unlock_irq(&mpc->egress_lock); 413 read_unlock_irq(&mpc->egress_lock);
395 return entry; 414 return entry;
@@ -421,7 +440,7 @@ static void eg_cache_remove_entry(eg_cache_entry *entry,
421 struct k_message msg; 440 struct k_message msg;
422 441
423 vcc = entry->shortcut; 442 vcc = entry->shortcut;
424 dprintk("mpoa: mpoa_caches.c: removing an egress entry.\n"); 443 dprintk("removing an egress entry.\n");
425 if (entry->prev != NULL) 444 if (entry->prev != NULL)
426 entry->prev->next = entry->next; 445 entry->prev->next = entry->next;
427 else 446 else
@@ -429,9 +448,9 @@ static void eg_cache_remove_entry(eg_cache_entry *entry,
429 if (entry->next != NULL) 448 if (entry->next != NULL)
430 entry->next->prev = entry->prev; 449 entry->next->prev = entry->prev;
431 client->eg_ops->put(entry); 450 client->eg_ops->put(entry);
432 if(client->in_cache == NULL && client->eg_cache == NULL){ 451 if (client->in_cache == NULL && client->eg_cache == NULL) {
433 msg.type = STOP_KEEP_ALIVE_SM; 452 msg.type = STOP_KEEP_ALIVE_SM;
434 msg_to_mpoad(&msg,client); 453 msg_to_mpoad(&msg, client);
435 } 454 }
436 455
437 /* Check if the ingress side still uses this VCC */ 456 /* Check if the ingress side still uses this VCC */
@@ -447,20 +466,21 @@ static void eg_cache_remove_entry(eg_cache_entry *entry,
447 return; 466 return;
448} 467}
449 468
450static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, struct mpoa_client *client) 469static eg_cache_entry *eg_cache_add_entry(struct k_message *msg,
470 struct mpoa_client *client)
451{ 471{
452 eg_cache_entry *entry = kzalloc(sizeof(eg_cache_entry), GFP_KERNEL); 472 eg_cache_entry *entry = kzalloc(sizeof(eg_cache_entry), GFP_KERNEL);
453 473
454 if (entry == NULL) { 474 if (entry == NULL) {
455 printk("mpoa: mpoa_caches.c: new_eg_cache_entry: out of memory\n"); 475 pr_info("out of memory\n");
456 return NULL; 476 return NULL;
457 } 477 }
458 478
459 dprintk("mpoa: mpoa_caches.c: adding an egress entry, ip = %pI4, this should be our IP\n", 479 dprintk("adding an egress entry, ip = %pI4, this should be our IP\n",
460 &msg->content.eg_info.eg_dst_ip); 480 &msg->content.eg_info.eg_dst_ip);
461 481
462 atomic_set(&entry->use, 1); 482 atomic_set(&entry->use, 1);
463 dprintk("mpoa: mpoa_caches.c: new_eg_cache_entry: about to lock\n"); 483 dprintk("new_eg_cache_entry: about to lock\n");
464 write_lock_irq(&client->egress_lock); 484 write_lock_irq(&client->egress_lock);
465 entry->next = client->eg_cache; 485 entry->next = client->eg_cache;
466 entry->prev = NULL; 486 entry->prev = NULL;
@@ -472,18 +492,18 @@ static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, struct mpoa_cli
472 entry->ctrl_info = msg->content.eg_info; 492 entry->ctrl_info = msg->content.eg_info;
473 do_gettimeofday(&(entry->tv)); 493 do_gettimeofday(&(entry->tv));
474 entry->entry_state = EGRESS_RESOLVED; 494 entry->entry_state = EGRESS_RESOLVED;
475 dprintk("mpoa: mpoa_caches.c: new_eg_cache_entry cache_id %lu\n", ntohl(entry->ctrl_info.cache_id)); 495 dprintk("new_eg_cache_entry cache_id %u\n",
476 dprintk("mpoa: mpoa_caches.c: mps_ip = %pI4\n", 496 ntohl(entry->ctrl_info.cache_id));
477 &entry->ctrl_info.mps_ip); 497 dprintk("mps_ip = %pI4\n", &entry->ctrl_info.mps_ip);
478 atomic_inc(&entry->use); 498 atomic_inc(&entry->use);
479 499
480 write_unlock_irq(&client->egress_lock); 500 write_unlock_irq(&client->egress_lock);
481 dprintk("mpoa: mpoa_caches.c: new_eg_cache_entry: unlocked\n"); 501 dprintk("new_eg_cache_entry: unlocked\n");
482 502
483 return entry; 503 return entry;
484} 504}
485 505
486static void update_eg_cache_entry(eg_cache_entry * entry, uint16_t holding_time) 506static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time)
487{ 507{
488 do_gettimeofday(&(entry->tv)); 508 do_gettimeofday(&(entry->tv));
489 entry->entry_state = EGRESS_RESOLVED; 509 entry->entry_state = EGRESS_RESOLVED;
@@ -502,13 +522,14 @@ static void clear_expired(struct mpoa_client *client)
502 522
503 write_lock_irq(&client->egress_lock); 523 write_lock_irq(&client->egress_lock);
504 entry = client->eg_cache; 524 entry = client->eg_cache;
505 while(entry != NULL){ 525 while (entry != NULL) {
506 next_entry = entry->next; 526 next_entry = entry->next;
507 if((now.tv_sec - entry->tv.tv_sec) 527 if ((now.tv_sec - entry->tv.tv_sec)
508 > entry->ctrl_info.holding_time){ 528 > entry->ctrl_info.holding_time) {
509 msg.type = SND_EGRESS_PURGE; 529 msg.type = SND_EGRESS_PURGE;
510 msg.content.eg_info = entry->ctrl_info; 530 msg.content.eg_info = entry->ctrl_info;
511 dprintk("mpoa: mpoa_caches.c: egress_cache: holding time expired, cache_id = %lu.\n",ntohl(entry->ctrl_info.cache_id)); 531 dprintk("egress_cache: holding time expired, cache_id = %u.\n",
532 ntohl(entry->ctrl_info.cache_id));
512 msg_to_mpoad(&msg, client); 533 msg_to_mpoad(&msg, client);
513 client->eg_ops->remove_entry(entry, client); 534 client->eg_ops->remove_entry(entry, client);
514 } 535 }
@@ -522,7 +543,7 @@ static void clear_expired(struct mpoa_client *client)
522static void eg_destroy_cache(struct mpoa_client *mpc) 543static void eg_destroy_cache(struct mpoa_client *mpc)
523{ 544{
524 write_lock_irq(&mpc->egress_lock); 545 write_lock_irq(&mpc->egress_lock);
525 while(mpc->eg_cache != NULL) 546 while (mpc->eg_cache != NULL)
526 mpc->eg_ops->remove_entry(mpc->eg_cache, mpc); 547 mpc->eg_ops->remove_entry(mpc->eg_cache, mpc);
527 write_unlock_irq(&mpc->egress_lock); 548 write_unlock_irq(&mpc->egress_lock);
528 549
@@ -530,7 +551,6 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
530} 551}
531 552
532 553
533
534static struct in_cache_ops ingress_ops = { 554static struct in_cache_ops ingress_ops = {
535 in_cache_add_entry, /* add_entry */ 555 in_cache_add_entry, /* add_entry */
536 in_cache_get, /* get */ 556 in_cache_get, /* get */
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 1a0f5ccea9c..b9bdb98427e 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -1,3 +1,4 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
1 2
2#ifdef CONFIG_PROC_FS 3#ifdef CONFIG_PROC_FS
3#include <linux/errno.h> 4#include <linux/errno.h>
@@ -8,7 +9,7 @@
8#include <linux/proc_fs.h> 9#include <linux/proc_fs.h>
9#include <linux/time.h> 10#include <linux/time.h>
10#include <linux/seq_file.h> 11#include <linux/seq_file.h>
11#include <asm/uaccess.h> 12#include <linux/uaccess.h>
12#include <linux/atmmpc.h> 13#include <linux/atmmpc.h>
13#include <linux/atm.h> 14#include <linux/atm.h>
14#include "mpc.h" 15#include "mpc.h"
@@ -20,9 +21,23 @@
20 */ 21 */
21 22
22#if 1 23#if 1
23#define dprintk printk /* debug */ 24#define dprintk(format, args...) \
25 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
24#else 26#else
25#define dprintk(format,args...) 27#define dprintk(format, args...) \
28 do { if (0) \
29 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
30 } while (0)
31#endif
32
33#if 0
34#define ddprintk(format, args...) \
35 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
36#else
37#define ddprintk(format, args...) \
38 do { if (0) \
39 printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
40 } while (0)
26#endif 41#endif
27 42
28#define STAT_FILE_NAME "mpc" /* Our statistic file's name */ 43#define STAT_FILE_NAME "mpc" /* Our statistic file's name */
@@ -51,42 +66,37 @@ static const struct file_operations mpc_file_operations = {
51/* 66/*
52 * Returns the state of an ingress cache entry as a string 67 * Returns the state of an ingress cache entry as a string
53 */ 68 */
54static const char *ingress_state_string(int state){ 69static const char *ingress_state_string(int state)
55 switch(state) { 70{
71 switch (state) {
56 case INGRESS_RESOLVING: 72 case INGRESS_RESOLVING:
57 return "resolving "; 73 return "resolving ";
58 break;
59 case INGRESS_RESOLVED: 74 case INGRESS_RESOLVED:
60 return "resolved "; 75 return "resolved ";
61 break;
62 case INGRESS_INVALID: 76 case INGRESS_INVALID:
63 return "invalid "; 77 return "invalid ";
64 break;
65 case INGRESS_REFRESHING: 78 case INGRESS_REFRESHING:
66 return "refreshing "; 79 return "refreshing ";
67 break;
68 default:
69 return "";
70 } 80 }
81
82 return "";
71} 83}
72 84
73/* 85/*
74 * Returns the state of an egress cache entry as a string 86 * Returns the state of an egress cache entry as a string
75 */ 87 */
76static const char *egress_state_string(int state){ 88static const char *egress_state_string(int state)
77 switch(state) { 89{
90 switch (state) {
78 case EGRESS_RESOLVED: 91 case EGRESS_RESOLVED:
79 return "resolved "; 92 return "resolved ";
80 break;
81 case EGRESS_PURGE: 93 case EGRESS_PURGE:
82 return "purge "; 94 return "purge ";
83 break;
84 case EGRESS_INVALID: 95 case EGRESS_INVALID:
85 return "invalid "; 96 return "invalid ";
86 break;
87 default:
88 return "";
89 } 97 }
98
99 return "";
90} 100}
91 101
92/* 102/*
@@ -123,7 +133,6 @@ static void mpc_stop(struct seq_file *m, void *v)
123static int mpc_show(struct seq_file *m, void *v) 133static int mpc_show(struct seq_file *m, void *v)
124{ 134{
125 struct mpoa_client *mpc = v; 135 struct mpoa_client *mpc = v;
126 unsigned char *temp;
127 int i; 136 int i;
128 in_cache_entry *in_entry; 137 in_cache_entry *in_entry;
129 eg_cache_entry *eg_entry; 138 eg_cache_entry *eg_entry;
@@ -140,15 +149,17 @@ static int mpc_show(struct seq_file *m, void *v)
140 do_gettimeofday(&now); 149 do_gettimeofday(&now);
141 150
142 for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) { 151 for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) {
143 temp = (unsigned char *)&in_entry->ctrl_info.in_dst_ip; 152 sprintf(ip_string, "%pI4", &in_entry->ctrl_info.in_dst_ip);
144 sprintf(ip_string,"%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
145 seq_printf(m, "%-16s%s%-14lu%-12u", 153 seq_printf(m, "%-16s%s%-14lu%-12u",
146 ip_string, 154 ip_string,
147 ingress_state_string(in_entry->entry_state), 155 ingress_state_string(in_entry->entry_state),
148 in_entry->ctrl_info.holding_time-(now.tv_sec-in_entry->tv.tv_sec), 156 in_entry->ctrl_info.holding_time -
149 in_entry->packets_fwded); 157 (now.tv_sec-in_entry->tv.tv_sec),
158 in_entry->packets_fwded);
150 if (in_entry->shortcut) 159 if (in_entry->shortcut)
151 seq_printf(m, " %-3d %-3d",in_entry->shortcut->vpi,in_entry->shortcut->vci); 160 seq_printf(m, " %-3d %-3d",
161 in_entry->shortcut->vpi,
162 in_entry->shortcut->vci);
152 seq_printf(m, "\n"); 163 seq_printf(m, "\n");
153 } 164 }
154 165
@@ -156,21 +167,23 @@ static int mpc_show(struct seq_file *m, void *v)
156 seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n"); 167 seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n");
157 for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) { 168 for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) {
158 unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr; 169 unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr;
159 for(i = 0; i < ATM_ESA_LEN; i++) 170 for (i = 0; i < ATM_ESA_LEN; i++)
160 seq_printf(m, "%02x", p[i]); 171 seq_printf(m, "%02x", p[i]);
161 seq_printf(m, "\n%-16lu%s%-14lu%-15u", 172 seq_printf(m, "\n%-16lu%s%-14lu%-15u",
162 (unsigned long)ntohl(eg_entry->ctrl_info.cache_id), 173 (unsigned long)ntohl(eg_entry->ctrl_info.cache_id),
163 egress_state_string(eg_entry->entry_state), 174 egress_state_string(eg_entry->entry_state),
164 (eg_entry->ctrl_info.holding_time-(now.tv_sec-eg_entry->tv.tv_sec)), 175 (eg_entry->ctrl_info.holding_time -
176 (now.tv_sec-eg_entry->tv.tv_sec)),
165 eg_entry->packets_rcvd); 177 eg_entry->packets_rcvd);
166 178
167 /* latest IP address */ 179 /* latest IP address */
168 temp = (unsigned char *)&eg_entry->latest_ip_addr; 180 sprintf(ip_string, "%pI4", &eg_entry->latest_ip_addr);
169 sprintf(ip_string, "%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
170 seq_printf(m, "%-16s", ip_string); 181 seq_printf(m, "%-16s", ip_string);
171 182
172 if (eg_entry->shortcut) 183 if (eg_entry->shortcut)
173 seq_printf(m, " %-3d %-3d",eg_entry->shortcut->vpi,eg_entry->shortcut->vci); 184 seq_printf(m, " %-3d %-3d",
185 eg_entry->shortcut->vpi,
186 eg_entry->shortcut->vci);
174 seq_printf(m, "\n"); 187 seq_printf(m, "\n");
175 } 188 }
176 seq_printf(m, "\n"); 189 seq_printf(m, "\n");
@@ -258,12 +271,9 @@ static int parse_qos(const char *buff)
258 qos.rxtp.max_pcr = rx_pcr; 271 qos.rxtp.max_pcr = rx_pcr;
259 qos.rxtp.max_sdu = rx_sdu; 272 qos.rxtp.max_sdu = rx_sdu;
260 qos.aal = ATM_AAL5; 273 qos.aal = ATM_AAL5;
261 dprintk("mpoa: mpoa_proc.c: parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n", 274 dprintk("parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n",
262 qos.txtp.max_pcr, 275 qos.txtp.max_pcr, qos.txtp.max_sdu,
263 qos.txtp.max_sdu, 276 qos.rxtp.max_pcr, qos.rxtp.max_sdu);
264 qos.rxtp.max_pcr,
265 qos.rxtp.max_sdu
266 );
267 277
268 atm_mpoa_add_qos(ipaddr, &qos); 278 atm_mpoa_add_qos(ipaddr, &qos);
269 return 1; 279 return 1;
@@ -278,7 +288,7 @@ int mpc_proc_init(void)
278 288
279 p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_file_operations); 289 p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_file_operations);
280 if (!p) { 290 if (!p) {
281 printk(KERN_ERR "Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME); 291 pr_err("Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME);
282 return -ENOMEM; 292 return -ENOMEM;
283 } 293 }
284 return 0; 294 return 0;
@@ -289,10 +299,9 @@ int mpc_proc_init(void)
289 */ 299 */
290void mpc_proc_clean(void) 300void mpc_proc_clean(void)
291{ 301{
292 remove_proc_entry(STAT_FILE_NAME,atm_proc_root); 302 remove_proc_entry(STAT_FILE_NAME, atm_proc_root);
293} 303}
294 304
295
296#endif /* CONFIG_PROC_FS */ 305#endif /* CONFIG_PROC_FS */
297 306
298 307
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 0af84cd4f65..400839273c6 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -33,6 +33,8 @@
33 * These hooks are not yet available in ppp_generic 33 * These hooks are not yet available in ppp_generic
34 */ 34 */
35 35
36#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
37
36#include <linux/module.h> 38#include <linux/module.h>
37#include <linux/init.h> 39#include <linux/init.h>
38#include <linux/skbuff.h> 40#include <linux/skbuff.h>
@@ -132,7 +134,7 @@ static void pppoatm_unassign_vcc(struct atm_vcc *atmvcc)
132static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb) 134static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
133{ 135{
134 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc); 136 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
135 pr_debug("pppoatm push\n"); 137 pr_debug("\n");
136 if (skb == NULL) { /* VCC was closed */ 138 if (skb == NULL) { /* VCC was closed */
137 pr_debug("removing ATMPPP VCC %p\n", pvcc); 139 pr_debug("removing ATMPPP VCC %p\n", pvcc);
138 pppoatm_unassign_vcc(atmvcc); 140 pppoatm_unassign_vcc(atmvcc);
@@ -165,17 +167,17 @@ static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
165 pvcc->chan.mtu += LLC_LEN; 167 pvcc->chan.mtu += LLC_LEN;
166 break; 168 break;
167 } 169 }
168 pr_debug("Couldn't autodetect yet " 170 pr_debug("Couldn't autodetect yet (skb: %02X %02X %02X %02X %02X %02X)\n",
169 "(skb: %02X %02X %02X %02X %02X %02X)\n", 171 skb->data[0], skb->data[1], skb->data[2],
170 skb->data[0], skb->data[1], skb->data[2], 172 skb->data[3], skb->data[4], skb->data[5]);
171 skb->data[3], skb->data[4], skb->data[5]);
172 goto error; 173 goto error;
173 case e_vc: 174 case e_vc:
174 break; 175 break;
175 } 176 }
176 ppp_input(&pvcc->chan, skb); 177 ppp_input(&pvcc->chan, skb);
177 return; 178 return;
178 error: 179
180error:
179 kfree_skb(skb); 181 kfree_skb(skb);
180 ppp_input_error(&pvcc->chan, 0); 182 ppp_input_error(&pvcc->chan, 0);
181} 183}
@@ -194,7 +196,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
194{ 196{
195 struct pppoatm_vcc *pvcc = chan_to_pvcc(chan); 197 struct pppoatm_vcc *pvcc = chan_to_pvcc(chan);
196 ATM_SKB(skb)->vcc = pvcc->atmvcc; 198 ATM_SKB(skb)->vcc = pvcc->atmvcc;
197 pr_debug("pppoatm_send (skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc); 199 pr_debug("(skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc);
198 if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT)) 200 if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT))
199 (void) skb_pull(skb, 1); 201 (void) skb_pull(skb, 1);
200 switch (pvcc->encaps) { /* LLC encapsulation needed */ 202 switch (pvcc->encaps) { /* LLC encapsulation needed */
@@ -208,7 +210,8 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
208 goto nospace; 210 goto nospace;
209 } 211 }
210 kfree_skb(skb); 212 kfree_skb(skb);
211 if ((skb = n) == NULL) 213 skb = n;
214 if (skb == NULL)
212 return DROP_PACKET; 215 return DROP_PACKET;
213 } else if (!atm_may_send(pvcc->atmvcc, skb->truesize)) 216 } else if (!atm_may_send(pvcc->atmvcc, skb->truesize))
214 goto nospace; 217 goto nospace;
@@ -226,11 +229,11 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
226 229
227 atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc); 230 atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
228 ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options; 231 ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
229 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, ATM_SKB(skb)->vcc, 232 pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
230 ATM_SKB(skb)->vcc->dev); 233 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
231 return ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb) 234 return ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
232 ? DROP_PACKET : 1; 235 ? DROP_PACKET : 1;
233 nospace: 236nospace:
234 /* 237 /*
235 * We don't have space to send this SKB now, but we might have 238 * We don't have space to send this SKB now, but we might have
236 * already applied SC_COMP_PROT compression, so may need to undo 239 * already applied SC_COMP_PROT compression, so may need to undo
@@ -289,7 +292,8 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
289 (be.encaps == e_vc ? 0 : LLC_LEN); 292 (be.encaps == e_vc ? 0 : LLC_LEN);
290 pvcc->wakeup_tasklet = tasklet_proto; 293 pvcc->wakeup_tasklet = tasklet_proto;
291 pvcc->wakeup_tasklet.data = (unsigned long) &pvcc->chan; 294 pvcc->wakeup_tasklet.data = (unsigned long) &pvcc->chan;
292 if ((err = ppp_register_channel(&pvcc->chan)) != 0) { 295 err = ppp_register_channel(&pvcc->chan);
296 if (err != 0) {
293 kfree(pvcc); 297 kfree(pvcc);
294 return err; 298 return err;
295 } 299 }
diff --git a/net/atm/proc.c b/net/atm/proc.c
index ab8419a324b..7a96b2376bd 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -24,15 +24,15 @@
24#include <linux/init.h> /* for __init */ 24#include <linux/init.h> /* for __init */
25#include <net/net_namespace.h> 25#include <net/net_namespace.h>
26#include <net/atmclip.h> 26#include <net/atmclip.h>
27#include <asm/uaccess.h> 27#include <linux/uaccess.h>
28#include <linux/param.h> /* for HZ */
28#include <asm/atomic.h> 29#include <asm/atomic.h>
29#include <asm/param.h> /* for HZ */
30#include "resources.h" 30#include "resources.h"
31#include "common.h" /* atm_proc_init prototype */ 31#include "common.h" /* atm_proc_init prototype */
32#include "signaling.h" /* to get sigd - ugly too */ 32#include "signaling.h" /* to get sigd - ugly too */
33 33
34static ssize_t proc_dev_atm_read(struct file *file,char __user *buf,size_t count, 34static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
35 loff_t *pos); 35 size_t count, loff_t *pos);
36 36
37static const struct file_operations proc_atm_dev_ops = { 37static const struct file_operations proc_atm_dev_ops = {
38 .owner = THIS_MODULE, 38 .owner = THIS_MODULE,
@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
43 const struct k_atm_aal_stats *stats) 43 const struct k_atm_aal_stats *stats)
44{ 44{
45 seq_printf(seq, "%s ( %d %d %d %d %d )", aal, 45 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
46 atomic_read(&stats->tx),atomic_read(&stats->tx_err), 46 atomic_read(&stats->tx), atomic_read(&stats->tx_err),
47 atomic_read(&stats->rx),atomic_read(&stats->rx_err), 47 atomic_read(&stats->rx), atomic_read(&stats->rx_err),
48 atomic_read(&stats->rx_drop)); 48 atomic_read(&stats->rx_drop));
49} 49}
50 50
51static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev) 51static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
@@ -151,8 +151,8 @@ static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
151 151
152static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc) 152static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc)
153{ 153{
154 static const char *const class_name[] = 154 static const char *const class_name[] = {
155 {"off","UBR","CBR","VBR","ABR"}; 155 "off", "UBR", "CBR", "VBR", "ABR"};
156 static const char *const aal_name[] = { 156 static const char *const aal_name[] = {
157 "---", "1", "2", "3/4", /* 0- 3 */ 157 "---", "1", "2", "3/4", /* 0- 3 */
158 "???", "5", "???", "???", /* 4- 7 */ 158 "???", "5", "???", "???", /* 4- 7 */
@@ -160,11 +160,12 @@ static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc)
160 "???", "0", "???", "???"}; /* 12-15 */ 160 "???", "0", "???", "???"}; /* 12-15 */
161 161
162 seq_printf(seq, "%3d %3d %5d %-3s %7d %-5s %7d %-6s", 162 seq_printf(seq, "%3d %3d %5d %-3s %7d %-5s %7d %-6s",
163 vcc->dev->number,vcc->vpi,vcc->vci, 163 vcc->dev->number, vcc->vpi, vcc->vci,
164 vcc->qos.aal >= ARRAY_SIZE(aal_name) ? "err" : 164 vcc->qos.aal >= ARRAY_SIZE(aal_name) ? "err" :
165 aal_name[vcc->qos.aal],vcc->qos.rxtp.min_pcr, 165 aal_name[vcc->qos.aal], vcc->qos.rxtp.min_pcr,
166 class_name[vcc->qos.rxtp.traffic_class],vcc->qos.txtp.min_pcr, 166 class_name[vcc->qos.rxtp.traffic_class],
167 class_name[vcc->qos.txtp.traffic_class]); 167 vcc->qos.txtp.min_pcr,
168 class_name[vcc->qos.txtp.traffic_class]);
168 if (test_bit(ATM_VF_IS_CLIP, &vcc->flags)) { 169 if (test_bit(ATM_VF_IS_CLIP, &vcc->flags)) {
169 struct clip_vcc *clip_vcc = CLIP_VCC(vcc); 170 struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
170 struct net_device *dev; 171 struct net_device *dev;
@@ -195,19 +196,20 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
195 seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi, 196 seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi,
196 vcc->vci); 197 vcc->vci);
197 switch (sk->sk_family) { 198 switch (sk->sk_family) {
198 case AF_ATMPVC: 199 case AF_ATMPVC:
199 seq_printf(seq, "PVC"); 200 seq_printf(seq, "PVC");
200 break; 201 break;
201 case AF_ATMSVC: 202 case AF_ATMSVC:
202 seq_printf(seq, "SVC"); 203 seq_printf(seq, "SVC");
203 break; 204 break;
204 default: 205 default:
205 seq_printf(seq, "%3d", sk->sk_family); 206 seq_printf(seq, "%3d", sk->sk_family);
206 } 207 }
207 seq_printf(seq, " %04lx %5d %7d/%7d %7d/%7d [%d]\n", vcc->flags, sk->sk_err, 208 seq_printf(seq, " %04lx %5d %7d/%7d %7d/%7d [%d]\n",
208 sk_wmem_alloc_get(sk), sk->sk_sndbuf, 209 vcc->flags, sk->sk_err,
209 sk_rmem_alloc_get(sk), sk->sk_rcvbuf, 210 sk_wmem_alloc_get(sk), sk->sk_sndbuf,
210 atomic_read(&sk->sk_refcnt)); 211 sk_rmem_alloc_get(sk), sk->sk_rcvbuf,
212 atomic_read(&sk->sk_refcnt));
211} 213}
212 214
213static void svc_info(struct seq_file *seq, struct atm_vcc *vcc) 215static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
@@ -236,7 +238,7 @@ static int atm_dev_seq_show(struct seq_file *seq, void *v)
236 "Itf Type ESI/\"MAC\"addr " 238 "Itf Type ESI/\"MAC\"addr "
237 "AAL(TX,err,RX,err,drop) ... [refcnt]\n"; 239 "AAL(TX,err,RX,err,drop) ... [refcnt]\n";
238 240
239 if (v == SEQ_START_TOKEN) 241 if (v == &atm_devs)
240 seq_puts(seq, atm_dev_banner); 242 seq_puts(seq, atm_dev_banner);
241 else { 243 else {
242 struct atm_dev *dev = list_entry(v, struct atm_dev, dev_list); 244 struct atm_dev *dev = list_entry(v, struct atm_dev, dev_list);
@@ -376,32 +378,35 @@ static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
376 unsigned long page; 378 unsigned long page;
377 int length; 379 int length;
378 380
379 if (count == 0) return 0; 381 if (count == 0)
382 return 0;
380 page = get_zeroed_page(GFP_KERNEL); 383 page = get_zeroed_page(GFP_KERNEL);
381 if (!page) return -ENOMEM; 384 if (!page)
385 return -ENOMEM;
382 dev = PDE(file->f_path.dentry->d_inode)->data; 386 dev = PDE(file->f_path.dentry->d_inode)->data;
383 if (!dev->ops->proc_read) 387 if (!dev->ops->proc_read)
384 length = -EINVAL; 388 length = -EINVAL;
385 else { 389 else {
386 length = dev->ops->proc_read(dev,pos,(char *) page); 390 length = dev->ops->proc_read(dev, pos, (char *)page);
387 if (length > count) length = -EINVAL; 391 if (length > count)
392 length = -EINVAL;
388 } 393 }
389 if (length >= 0) { 394 if (length >= 0) {
390 if (copy_to_user(buf,(char *) page,length)) length = -EFAULT; 395 if (copy_to_user(buf, (char *)page, length))
396 length = -EFAULT;
391 (*pos)++; 397 (*pos)++;
392 } 398 }
393 free_page(page); 399 free_page(page);
394 return length; 400 return length;
395} 401}
396 402
397
398struct proc_dir_entry *atm_proc_root; 403struct proc_dir_entry *atm_proc_root;
399EXPORT_SYMBOL(atm_proc_root); 404EXPORT_SYMBOL(atm_proc_root);
400 405
401 406
402int atm_proc_dev_register(struct atm_dev *dev) 407int atm_proc_dev_register(struct atm_dev *dev)
403{ 408{
404 int digits,num; 409 int digits, num;
405 int error; 410 int error;
406 411
407 /* No proc info */ 412 /* No proc info */
@@ -410,26 +415,28 @@ int atm_proc_dev_register(struct atm_dev *dev)
410 415
411 error = -ENOMEM; 416 error = -ENOMEM;
412 digits = 0; 417 digits = 0;
413 for (num = dev->number; num; num /= 10) digits++; 418 for (num = dev->number; num; num /= 10)
414 if (!digits) digits++; 419 digits++;
420 if (!digits)
421 digits++;
415 422
416 dev->proc_name = kmalloc(strlen(dev->type) + digits + 2, GFP_KERNEL); 423 dev->proc_name = kmalloc(strlen(dev->type) + digits + 2, GFP_KERNEL);
417 if (!dev->proc_name) 424 if (!dev->proc_name)
418 goto err_out; 425 goto err_out;
419 sprintf(dev->proc_name,"%s:%d",dev->type, dev->number); 426 sprintf(dev->proc_name, "%s:%d", dev->type, dev->number);
420 427
421 dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root, 428 dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root,
422 &proc_atm_dev_ops, dev); 429 &proc_atm_dev_ops, dev);
423 if (!dev->proc_entry) 430 if (!dev->proc_entry)
424 goto err_free_name; 431 goto err_free_name;
425 return 0; 432 return 0;
433
426err_free_name: 434err_free_name:
427 kfree(dev->proc_name); 435 kfree(dev->proc_name);
428err_out: 436err_out:
429 return error; 437 return error;
430} 438}
431 439
432
433void atm_proc_dev_deregister(struct atm_dev *dev) 440void atm_proc_dev_deregister(struct atm_dev *dev)
434{ 441{
435 if (!dev->ops->proc_read) 442 if (!dev->ops->proc_read)
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index 8d74e62b0d7..437ee70c5e6 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -17,32 +17,35 @@
17#include "common.h" /* common for PVCs and SVCs */ 17#include "common.h" /* common for PVCs and SVCs */
18 18
19 19
20static int pvc_shutdown(struct socket *sock,int how) 20static int pvc_shutdown(struct socket *sock, int how)
21{ 21{
22 return 0; 22 return 0;
23} 23}
24 24
25 25static int pvc_bind(struct socket *sock, struct sockaddr *sockaddr,
26static int pvc_bind(struct socket *sock,struct sockaddr *sockaddr, 26 int sockaddr_len)
27 int sockaddr_len)
28{ 27{
29 struct sock *sk = sock->sk; 28 struct sock *sk = sock->sk;
30 struct sockaddr_atmpvc *addr; 29 struct sockaddr_atmpvc *addr;
31 struct atm_vcc *vcc; 30 struct atm_vcc *vcc;
32 int error; 31 int error;
33 32
34 if (sockaddr_len != sizeof(struct sockaddr_atmpvc)) return -EINVAL; 33 if (sockaddr_len != sizeof(struct sockaddr_atmpvc))
35 addr = (struct sockaddr_atmpvc *) sockaddr; 34 return -EINVAL;
36 if (addr->sap_family != AF_ATMPVC) return -EAFNOSUPPORT; 35 addr = (struct sockaddr_atmpvc *)sockaddr;
36 if (addr->sap_family != AF_ATMPVC)
37 return -EAFNOSUPPORT;
37 lock_sock(sk); 38 lock_sock(sk);
38 vcc = ATM_SD(sock); 39 vcc = ATM_SD(sock);
39 if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) { 40 if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
40 error = -EBADFD; 41 error = -EBADFD;
41 goto out; 42 goto out;
42 } 43 }
43 if (test_bit(ATM_VF_PARTIAL,&vcc->flags)) { 44 if (test_bit(ATM_VF_PARTIAL, &vcc->flags)) {
44 if (vcc->vpi != ATM_VPI_UNSPEC) addr->sap_addr.vpi = vcc->vpi; 45 if (vcc->vpi != ATM_VPI_UNSPEC)
45 if (vcc->vci != ATM_VCI_UNSPEC) addr->sap_addr.vci = vcc->vci; 46 addr->sap_addr.vpi = vcc->vpi;
47 if (vcc->vci != ATM_VCI_UNSPEC)
48 addr->sap_addr.vci = vcc->vci;
46 } 49 }
47 error = vcc_connect(sock, addr->sap_addr.itf, addr->sap_addr.vpi, 50 error = vcc_connect(sock, addr->sap_addr.itf, addr->sap_addr.vpi,
48 addr->sap_addr.vci); 51 addr->sap_addr.vci);
@@ -51,11 +54,10 @@ out:
51 return error; 54 return error;
52} 55}
53 56
54 57static int pvc_connect(struct socket *sock, struct sockaddr *sockaddr,
55static int pvc_connect(struct socket *sock,struct sockaddr *sockaddr, 58 int sockaddr_len, int flags)
56 int sockaddr_len,int flags)
57{ 59{
58 return pvc_bind(sock,sockaddr,sockaddr_len); 60 return pvc_bind(sock, sockaddr, sockaddr_len);
59} 61}
60 62
61static int pvc_setsockopt(struct socket *sock, int level, int optname, 63static int pvc_setsockopt(struct socket *sock, int level, int optname,
@@ -70,7 +72,6 @@ static int pvc_setsockopt(struct socket *sock, int level, int optname,
70 return error; 72 return error;
71} 73}
72 74
73
74static int pvc_getsockopt(struct socket *sock, int level, int optname, 75static int pvc_getsockopt(struct socket *sock, int level, int optname,
75 char __user *optval, int __user *optlen) 76 char __user *optval, int __user *optlen)
76{ 77{
@@ -83,16 +84,16 @@ static int pvc_getsockopt(struct socket *sock, int level, int optname,
83 return error; 84 return error;
84} 85}
85 86
86 87static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr,
87static int pvc_getname(struct socket *sock,struct sockaddr *sockaddr, 88 int *sockaddr_len, int peer)
88 int *sockaddr_len,int peer)
89{ 89{
90 struct sockaddr_atmpvc *addr; 90 struct sockaddr_atmpvc *addr;
91 struct atm_vcc *vcc = ATM_SD(sock); 91 struct atm_vcc *vcc = ATM_SD(sock);
92 92
93 if (!vcc->dev || !test_bit(ATM_VF_ADDR,&vcc->flags)) return -ENOTCONN; 93 if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
94 return -ENOTCONN;
94 *sockaddr_len = sizeof(struct sockaddr_atmpvc); 95 *sockaddr_len = sizeof(struct sockaddr_atmpvc);
95 addr = (struct sockaddr_atmpvc *) sockaddr; 96 addr = (struct sockaddr_atmpvc *)sockaddr;
96 addr->sap_family = AF_ATMPVC; 97 addr->sap_family = AF_ATMPVC;
97 addr->sap_addr.itf = vcc->dev->number; 98 addr->sap_addr.itf = vcc->dev->number;
98 addr->sap_addr.vpi = vcc->vpi; 99 addr->sap_addr.vpi = vcc->vpi;
@@ -100,7 +101,6 @@ static int pvc_getname(struct socket *sock,struct sockaddr *sockaddr,
100 return 0; 101 return 0;
101} 102}
102 103
103
104static const struct proto_ops pvc_proto_ops = { 104static const struct proto_ops pvc_proto_ops = {
105 .family = PF_ATMPVC, 105 .family = PF_ATMPVC,
106 .owner = THIS_MODULE, 106 .owner = THIS_MODULE,
@@ -137,7 +137,6 @@ static int pvc_create(struct net *net, struct socket *sock, int protocol,
137 return vcc_create(net, sock, protocol, PF_ATMPVC); 137 return vcc_create(net, sock, protocol, PF_ATMPVC);
138} 138}
139 139
140
141static const struct net_proto_family pvc_family_ops = { 140static const struct net_proto_family pvc_family_ops = {
142 .family = PF_ATMPVC, 141 .family = PF_ATMPVC,
143 .create = pvc_create, 142 .create = pvc_create,
diff --git a/net/atm/raw.c b/net/atm/raw.c
index cbfcc71a17b..d0c4bd047dc 100644
--- a/net/atm/raw.c
+++ b/net/atm/raw.c
@@ -2,6 +2,7 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
5 6
6#include <linux/module.h> 7#include <linux/module.h>
7#include <linux/atmdev.h> 8#include <linux/atmdev.h>
@@ -17,7 +18,7 @@
17 * SKB == NULL indicates that the link is being closed 18 * SKB == NULL indicates that the link is being closed
18 */ 19 */
19 20
20static void atm_push_raw(struct atm_vcc *vcc,struct sk_buff *skb) 21static void atm_push_raw(struct atm_vcc *vcc, struct sk_buff *skb)
21{ 22{
22 if (skb) { 23 if (skb) {
23 struct sock *sk = sk_atm(vcc); 24 struct sock *sk = sk_atm(vcc);
@@ -27,36 +28,33 @@ static void atm_push_raw(struct atm_vcc *vcc,struct sk_buff *skb)
27 } 28 }
28} 29}
29 30
30 31static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
31static void atm_pop_raw(struct atm_vcc *vcc,struct sk_buff *skb)
32{ 32{
33 struct sock *sk = sk_atm(vcc); 33 struct sock *sk = sk_atm(vcc);
34 34
35 pr_debug("APopR (%d) %d -= %d\n", vcc->vci, 35 pr_debug("(%d) %d -= %d\n",
36 sk_wmem_alloc_get(sk), skb->truesize); 36 vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
37 atomic_sub(skb->truesize, &sk->sk_wmem_alloc); 37 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
38 dev_kfree_skb_any(skb); 38 dev_kfree_skb_any(skb);
39 sk->sk_write_space(sk); 39 sk->sk_write_space(sk);
40} 40}
41 41
42 42static int atm_send_aal0(struct atm_vcc *vcc, struct sk_buff *skb)
43static int atm_send_aal0(struct atm_vcc *vcc,struct sk_buff *skb)
44{ 43{
45 /* 44 /*
46 * Note that if vpi/vci are _ANY or _UNSPEC the below will 45 * Note that if vpi/vci are _ANY or _UNSPEC the below will
47 * still work 46 * still work
48 */ 47 */
49 if (!capable(CAP_NET_ADMIN) && 48 if (!capable(CAP_NET_ADMIN) &&
50 (((u32 *) skb->data)[0] & (ATM_HDR_VPI_MASK | ATM_HDR_VCI_MASK)) != 49 (((u32 *)skb->data)[0] & (ATM_HDR_VPI_MASK | ATM_HDR_VCI_MASK)) !=
51 ((vcc->vpi << ATM_HDR_VPI_SHIFT) | (vcc->vci << ATM_HDR_VCI_SHIFT))) 50 ((vcc->vpi << ATM_HDR_VPI_SHIFT) |
52 { 51 (vcc->vci << ATM_HDR_VCI_SHIFT))) {
53 kfree_skb(skb); 52 kfree_skb(skb);
54 return -EADDRNOTAVAIL; 53 return -EADDRNOTAVAIL;
55 } 54 }
56 return vcc->dev->ops->send(vcc,skb); 55 return vcc->dev->ops->send(vcc, skb);
57} 56}
58 57
59
60int atm_init_aal0(struct atm_vcc *vcc) 58int atm_init_aal0(struct atm_vcc *vcc)
61{ 59{
62 vcc->push = atm_push_raw; 60 vcc->push = atm_push_raw;
@@ -66,7 +64,6 @@ int atm_init_aal0(struct atm_vcc *vcc)
66 return 0; 64 return 0;
67} 65}
68 66
69
70int atm_init_aal34(struct atm_vcc *vcc) 67int atm_init_aal34(struct atm_vcc *vcc)
71{ 68{
72 vcc->push = atm_push_raw; 69 vcc->push = atm_push_raw;
@@ -76,7 +73,6 @@ int atm_init_aal34(struct atm_vcc *vcc)
76 return 0; 73 return 0;
77} 74}
78 75
79
80int atm_init_aal5(struct atm_vcc *vcc) 76int atm_init_aal5(struct atm_vcc *vcc)
81{ 77{
82 vcc->push = atm_push_raw; 78 vcc->push = atm_push_raw;
@@ -85,6 +81,4 @@ int atm_init_aal5(struct atm_vcc *vcc)
85 vcc->send = vcc->dev->ops->send; 81 vcc->send = vcc->dev->ops->send;
86 return 0; 82 return 0;
87} 83}
88
89
90EXPORT_SYMBOL(atm_init_aal5); 84EXPORT_SYMBOL(atm_init_aal5);
diff --git a/net/atm/resources.c b/net/atm/resources.c
index 56b7322ff46..90082904f20 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -7,6 +7,7 @@
7 * 2002/01 - don't free the whole struct sock on sk->destruct time, 7 * 2002/01 - don't free the whole struct sock on sk->destruct time,
8 * use the default destruct function initialized by sock_init_data */ 8 * use the default destruct function initialized by sock_init_data */
9 9
10#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
10 11
11#include <linux/ctype.h> 12#include <linux/ctype.h>
12#include <linux/string.h> 13#include <linux/string.h>
@@ -70,7 +71,7 @@ struct atm_dev *atm_dev_lookup(int number)
70 mutex_unlock(&atm_dev_mutex); 71 mutex_unlock(&atm_dev_mutex);
71 return dev; 72 return dev;
72} 73}
73 74EXPORT_SYMBOL(atm_dev_lookup);
74 75
75struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops, 76struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops,
76 int number, unsigned long *flags) 77 int number, unsigned long *flags)
@@ -79,13 +80,13 @@ struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops,
79 80
80 dev = __alloc_atm_dev(type); 81 dev = __alloc_atm_dev(type);
81 if (!dev) { 82 if (!dev) {
82 printk(KERN_ERR "atm_dev_register: no space for dev %s\n", 83 pr_err("no space for dev %s\n", type);
83 type);
84 return NULL; 84 return NULL;
85 } 85 }
86 mutex_lock(&atm_dev_mutex); 86 mutex_lock(&atm_dev_mutex);
87 if (number != -1) { 87 if (number != -1) {
88 if ((inuse = __atm_dev_lookup(number))) { 88 inuse = __atm_dev_lookup(number);
89 if (inuse) {
89 atm_dev_put(inuse); 90 atm_dev_put(inuse);
90 mutex_unlock(&atm_dev_mutex); 91 mutex_unlock(&atm_dev_mutex);
91 kfree(dev); 92 kfree(dev);
@@ -109,16 +110,12 @@ struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops,
109 atomic_set(&dev->refcnt, 1); 110 atomic_set(&dev->refcnt, 1);
110 111
111 if (atm_proc_dev_register(dev) < 0) { 112 if (atm_proc_dev_register(dev) < 0) {
112 printk(KERN_ERR "atm_dev_register: " 113 pr_err("atm_proc_dev_register failed for dev %s\n", type);
113 "atm_proc_dev_register failed for dev %s\n",
114 type);
115 goto out_fail; 114 goto out_fail;
116 } 115 }
117 116
118 if (atm_register_sysfs(dev) < 0) { 117 if (atm_register_sysfs(dev) < 0) {
119 printk(KERN_ERR "atm_dev_register: " 118 pr_err("atm_register_sysfs failed for dev %s\n", type);
120 "atm_register_sysfs failed for dev %s\n",
121 type);
122 atm_proc_dev_deregister(dev); 119 atm_proc_dev_deregister(dev);
123 goto out_fail; 120 goto out_fail;
124 } 121 }
@@ -134,7 +131,7 @@ out_fail:
134 dev = NULL; 131 dev = NULL;
135 goto out; 132 goto out;
136} 133}
137 134EXPORT_SYMBOL(atm_dev_register);
138 135
139void atm_dev_deregister(struct atm_dev *dev) 136void atm_dev_deregister(struct atm_dev *dev)
140{ 137{
@@ -156,7 +153,7 @@ void atm_dev_deregister(struct atm_dev *dev)
156 153
157 atm_dev_put(dev); 154 atm_dev_put(dev);
158} 155}
159 156EXPORT_SYMBOL(atm_dev_deregister);
160 157
161static void copy_aal_stats(struct k_atm_aal_stats *from, 158static void copy_aal_stats(struct k_atm_aal_stats *from,
162 struct atm_aal_stats *to) 159 struct atm_aal_stats *to)
@@ -166,7 +163,6 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
166#undef __HANDLE_ITEM 163#undef __HANDLE_ITEM
167} 164}
168 165
169
170static void subtract_aal_stats(struct k_atm_aal_stats *from, 166static void subtract_aal_stats(struct k_atm_aal_stats *from,
171 struct atm_aal_stats *to) 167 struct atm_aal_stats *to)
172{ 168{
@@ -175,8 +171,8 @@ static void subtract_aal_stats(struct k_atm_aal_stats *from,
175#undef __HANDLE_ITEM 171#undef __HANDLE_ITEM
176} 172}
177 173
178 174static int fetch_stats(struct atm_dev *dev, struct atm_dev_stats __user *arg,
179static int fetch_stats(struct atm_dev *dev, struct atm_dev_stats __user *arg, int zero) 175 int zero)
180{ 176{
181 struct atm_dev_stats tmp; 177 struct atm_dev_stats tmp;
182 int error = 0; 178 int error = 0;
@@ -194,7 +190,6 @@ static int fetch_stats(struct atm_dev *dev, struct atm_dev_stats __user *arg, in
194 return error ? -EFAULT : 0; 190 return error ? -EFAULT : 0;
195} 191}
196 192
197
198int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat) 193int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
199{ 194{
200 void __user *buf; 195 void __user *buf;
@@ -210,50 +205,49 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
210#endif 205#endif
211 206
212 switch (cmd) { 207 switch (cmd) {
213 case ATM_GETNAMES: 208 case ATM_GETNAMES:
214 209 if (compat) {
215 if (compat) {
216#ifdef CONFIG_COMPAT 210#ifdef CONFIG_COMPAT
217 struct compat_atm_iobuf __user *ciobuf = arg; 211 struct compat_atm_iobuf __user *ciobuf = arg;
218 compat_uptr_t cbuf; 212 compat_uptr_t cbuf;
219 iobuf_len = &ciobuf->length; 213 iobuf_len = &ciobuf->length;
220 if (get_user(cbuf, &ciobuf->buffer)) 214 if (get_user(cbuf, &ciobuf->buffer))
221 return -EFAULT; 215 return -EFAULT;
222 buf = compat_ptr(cbuf); 216 buf = compat_ptr(cbuf);
223#endif 217#endif
224 } else { 218 } else {
225 struct atm_iobuf __user *iobuf = arg; 219 struct atm_iobuf __user *iobuf = arg;
226 iobuf_len = &iobuf->length; 220 iobuf_len = &iobuf->length;
227 if (get_user(buf, &iobuf->buffer)) 221 if (get_user(buf, &iobuf->buffer))
228 return -EFAULT;
229 }
230 if (get_user(len, iobuf_len))
231 return -EFAULT; 222 return -EFAULT;
232 mutex_lock(&atm_dev_mutex); 223 }
233 list_for_each(p, &atm_devs) 224 if (get_user(len, iobuf_len))
234 size += sizeof(int); 225 return -EFAULT;
235 if (size > len) { 226 mutex_lock(&atm_dev_mutex);
236 mutex_unlock(&atm_dev_mutex); 227 list_for_each(p, &atm_devs)
237 return -E2BIG; 228 size += sizeof(int);
238 } 229 if (size > len) {
239 tmp_buf = kmalloc(size, GFP_ATOMIC); 230 mutex_unlock(&atm_dev_mutex);
240 if (!tmp_buf) { 231 return -E2BIG;
241 mutex_unlock(&atm_dev_mutex); 232 }
242 return -ENOMEM; 233 tmp_buf = kmalloc(size, GFP_ATOMIC);
243 } 234 if (!tmp_buf) {
244 tmp_p = tmp_buf;
245 list_for_each(p, &atm_devs) {
246 dev = list_entry(p, struct atm_dev, dev_list);
247 *tmp_p++ = dev->number;
248 }
249 mutex_unlock(&atm_dev_mutex); 235 mutex_unlock(&atm_dev_mutex);
250 error = ((copy_to_user(buf, tmp_buf, size)) || 236 return -ENOMEM;
251 put_user(size, iobuf_len)) 237 }
252 ? -EFAULT : 0; 238 tmp_p = tmp_buf;
253 kfree(tmp_buf); 239 list_for_each(p, &atm_devs) {
254 return error; 240 dev = list_entry(p, struct atm_dev, dev_list);
255 default: 241 *tmp_p++ = dev->number;
256 break; 242 }
243 mutex_unlock(&atm_dev_mutex);
244 error = ((copy_to_user(buf, tmp_buf, size)) ||
245 put_user(size, iobuf_len))
246 ? -EFAULT : 0;
247 kfree(tmp_buf);
248 return error;
249 default:
250 break;
257 } 251 }
258 252
259 if (compat) { 253 if (compat) {
@@ -282,166 +276,167 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
282 if (get_user(number, &sioc->number)) 276 if (get_user(number, &sioc->number))
283 return -EFAULT; 277 return -EFAULT;
284 } 278 }
285 if (!(dev = try_then_request_module(atm_dev_lookup(number), 279
286 "atm-device-%d", number))) 280 dev = try_then_request_module(atm_dev_lookup(number), "atm-device-%d",
281 number);
282 if (!dev)
287 return -ENODEV; 283 return -ENODEV;
288 284
289 switch (cmd) { 285 switch (cmd) {
290 case ATM_GETTYPE: 286 case ATM_GETTYPE:
291 size = strlen(dev->type) + 1; 287 size = strlen(dev->type) + 1;
292 if (copy_to_user(buf, dev->type, size)) { 288 if (copy_to_user(buf, dev->type, size)) {
293 error = -EFAULT; 289 error = -EFAULT;
294 goto done; 290 goto done;
295 } 291 }
296 break; 292 break;
297 case ATM_GETESI: 293 case ATM_GETESI:
298 size = ESI_LEN; 294 size = ESI_LEN;
299 if (copy_to_user(buf, dev->esi, size)) { 295 if (copy_to_user(buf, dev->esi, size)) {
300 error = -EFAULT; 296 error = -EFAULT;
301 goto done; 297 goto done;
302 } 298 }
303 break; 299 break;
304 case ATM_SETESI: 300 case ATM_SETESI:
305 { 301 {
306 int i; 302 int i;
307 303
308 for (i = 0; i < ESI_LEN; i++) 304 for (i = 0; i < ESI_LEN; i++)
309 if (dev->esi[i]) { 305 if (dev->esi[i]) {
310 error = -EEXIST; 306 error = -EEXIST;
311 goto done;
312 }
313 }
314 /* fall through */
315 case ATM_SETESIF:
316 {
317 unsigned char esi[ESI_LEN];
318
319 if (!capable(CAP_NET_ADMIN)) {
320 error = -EPERM;
321 goto done;
322 }
323 if (copy_from_user(esi, buf, ESI_LEN)) {
324 error = -EFAULT;
325 goto done;
326 }
327 memcpy(dev->esi, esi, ESI_LEN);
328 error = ESI_LEN;
329 goto done;
330 }
331 case ATM_GETSTATZ:
332 if (!capable(CAP_NET_ADMIN)) {
333 error = -EPERM;
334 goto done;
335 }
336 /* fall through */
337 case ATM_GETSTAT:
338 size = sizeof(struct atm_dev_stats);
339 error = fetch_stats(dev, buf, cmd == ATM_GETSTATZ);
340 if (error)
341 goto done;
342 break;
343 case ATM_GETCIRANGE:
344 size = sizeof(struct atm_cirange);
345 if (copy_to_user(buf, &dev->ci_range, size)) {
346 error = -EFAULT;
347 goto done;
348 }
349 break;
350 case ATM_GETLINKRATE:
351 size = sizeof(int);
352 if (copy_to_user(buf, &dev->link_rate, size)) {
353 error = -EFAULT;
354 goto done;
355 }
356 break;
357 case ATM_RSTADDR:
358 if (!capable(CAP_NET_ADMIN)) {
359 error = -EPERM;
360 goto done;
361 }
362 atm_reset_addr(dev, ATM_ADDR_LOCAL);
363 break;
364 case ATM_ADDADDR:
365 case ATM_DELADDR:
366 case ATM_ADDLECSADDR:
367 case ATM_DELLECSADDR:
368 if (!capable(CAP_NET_ADMIN)) {
369 error = -EPERM;
370 goto done;
371 }
372 {
373 struct sockaddr_atmsvc addr;
374
375 if (copy_from_user(&addr, buf, sizeof(addr))) {
376 error = -EFAULT;
377 goto done;
378 }
379 if (cmd == ATM_ADDADDR || cmd == ATM_ADDLECSADDR)
380 error = atm_add_addr(dev, &addr,
381 (cmd == ATM_ADDADDR ?
382 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
383 else
384 error = atm_del_addr(dev, &addr,
385 (cmd == ATM_DELADDR ?
386 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
387 goto done; 307 goto done;
388 } 308 }
389 case ATM_GETADDR: 309 }
390 case ATM_GETLECSADDR: 310 /* fall through */
391 error = atm_get_addr(dev, buf, len, 311 case ATM_SETESIF:
392 (cmd == ATM_GETADDR ? 312 {
313 unsigned char esi[ESI_LEN];
314
315 if (!capable(CAP_NET_ADMIN)) {
316 error = -EPERM;
317 goto done;
318 }
319 if (copy_from_user(esi, buf, ESI_LEN)) {
320 error = -EFAULT;
321 goto done;
322 }
323 memcpy(dev->esi, esi, ESI_LEN);
324 error = ESI_LEN;
325 goto done;
326 }
327 case ATM_GETSTATZ:
328 if (!capable(CAP_NET_ADMIN)) {
329 error = -EPERM;
330 goto done;
331 }
332 /* fall through */
333 case ATM_GETSTAT:
334 size = sizeof(struct atm_dev_stats);
335 error = fetch_stats(dev, buf, cmd == ATM_GETSTATZ);
336 if (error)
337 goto done;
338 break;
339 case ATM_GETCIRANGE:
340 size = sizeof(struct atm_cirange);
341 if (copy_to_user(buf, &dev->ci_range, size)) {
342 error = -EFAULT;
343 goto done;
344 }
345 break;
346 case ATM_GETLINKRATE:
347 size = sizeof(int);
348 if (copy_to_user(buf, &dev->link_rate, size)) {
349 error = -EFAULT;
350 goto done;
351 }
352 break;
353 case ATM_RSTADDR:
354 if (!capable(CAP_NET_ADMIN)) {
355 error = -EPERM;
356 goto done;
357 }
358 atm_reset_addr(dev, ATM_ADDR_LOCAL);
359 break;
360 case ATM_ADDADDR:
361 case ATM_DELADDR:
362 case ATM_ADDLECSADDR:
363 case ATM_DELLECSADDR:
364 {
365 struct sockaddr_atmsvc addr;
366
367 if (!capable(CAP_NET_ADMIN)) {
368 error = -EPERM;
369 goto done;
370 }
371
372 if (copy_from_user(&addr, buf, sizeof(addr))) {
373 error = -EFAULT;
374 goto done;
375 }
376 if (cmd == ATM_ADDADDR || cmd == ATM_ADDLECSADDR)
377 error = atm_add_addr(dev, &addr,
378 (cmd == ATM_ADDADDR ?
393 ATM_ADDR_LOCAL : ATM_ADDR_LECS)); 379 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
394 if (error < 0) 380 else
395 goto done; 381 error = atm_del_addr(dev, &addr,
396 size = error; 382 (cmd == ATM_DELADDR ?
397 /* may return 0, but later on size == 0 means "don't 383 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
398 write the length" */ 384 goto done;
399 error = put_user(size, sioc_len) 385 }
400 ? -EFAULT : 0; 386 case ATM_GETADDR:
387 case ATM_GETLECSADDR:
388 error = atm_get_addr(dev, buf, len,
389 (cmd == ATM_GETADDR ?
390 ATM_ADDR_LOCAL : ATM_ADDR_LECS));
391 if (error < 0)
392 goto done;
393 size = error;
394 /* may return 0, but later on size == 0 means "don't
395 write the length" */
396 error = put_user(size, sioc_len) ? -EFAULT : 0;
397 goto done;
398 case ATM_SETLOOP:
399 if (__ATM_LM_XTRMT((int) (unsigned long) buf) &&
400 __ATM_LM_XTLOC((int) (unsigned long) buf) >
401 __ATM_LM_XTRMT((int) (unsigned long) buf)) {
402 error = -EINVAL;
403 goto done;
404 }
405 /* fall through */
406 case ATM_SETCIRANGE:
407 case SONET_GETSTATZ:
408 case SONET_SETDIAG:
409 case SONET_CLRDIAG:
410 case SONET_SETFRAMING:
411 if (!capable(CAP_NET_ADMIN)) {
412 error = -EPERM;
401 goto done; 413 goto done;
402 case ATM_SETLOOP: 414 }
403 if (__ATM_LM_XTRMT((int) (unsigned long) buf) && 415 /* fall through */
404 __ATM_LM_XTLOC((int) (unsigned long) buf) > 416 default:
405 __ATM_LM_XTRMT((int) (unsigned long) buf)) { 417 if (compat) {
418#ifdef CONFIG_COMPAT
419 if (!dev->ops->compat_ioctl) {
406 error = -EINVAL; 420 error = -EINVAL;
407 goto done; 421 goto done;
408 } 422 }
409 /* fall through */ 423 size = dev->ops->compat_ioctl(dev, cmd, buf);
410 case ATM_SETCIRANGE:
411 case SONET_GETSTATZ:
412 case SONET_SETDIAG:
413 case SONET_CLRDIAG:
414 case SONET_SETFRAMING:
415 if (!capable(CAP_NET_ADMIN)) {
416 error = -EPERM;
417 goto done;
418 }
419 /* fall through */
420 default:
421 if (compat) {
422#ifdef CONFIG_COMPAT
423 if (!dev->ops->compat_ioctl) {
424 error = -EINVAL;
425 goto done;
426 }
427 size = dev->ops->compat_ioctl(dev, cmd, buf);
428#endif 424#endif
429 } else { 425 } else {
430 if (!dev->ops->ioctl) { 426 if (!dev->ops->ioctl) {
431 error = -EINVAL; 427 error = -EINVAL;
432 goto done;
433 }
434 size = dev->ops->ioctl(dev, cmd, buf);
435 }
436 if (size < 0) {
437 error = (size == -ENOIOCTLCMD ? -EINVAL : size);
438 goto done; 428 goto done;
439 } 429 }
430 size = dev->ops->ioctl(dev, cmd, buf);
431 }
432 if (size < 0) {
433 error = (size == -ENOIOCTLCMD ? -EINVAL : size);
434 goto done;
435 }
440 } 436 }
441 437
442 if (size) 438 if (size)
443 error = put_user(size, sioc_len) 439 error = put_user(size, sioc_len) ? -EFAULT : 0;
444 ? -EFAULT : 0;
445 else 440 else
446 error = 0; 441 error = 0;
447done: 442done:
@@ -449,21 +444,10 @@ done:
449 return error; 444 return error;
450} 445}
451 446
452static __inline__ void *dev_get_idx(loff_t left)
453{
454 struct list_head *p;
455
456 list_for_each(p, &atm_devs) {
457 if (!--left)
458 break;
459 }
460 return (p != &atm_devs) ? p : NULL;
461}
462
463void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos) 447void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos)
464{ 448{
465 mutex_lock(&atm_dev_mutex); 449 mutex_lock(&atm_dev_mutex);
466 return *pos ? dev_get_idx(*pos) : SEQ_START_TOKEN; 450 return seq_list_start_head(&atm_devs, *pos);
467} 451}
468 452
469void atm_dev_seq_stop(struct seq_file *seq, void *v) 453void atm_dev_seq_stop(struct seq_file *seq, void *v)
@@ -473,13 +457,5 @@ void atm_dev_seq_stop(struct seq_file *seq, void *v)
473 457
474void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 458void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
475{ 459{
476 ++*pos; 460 return seq_list_next(v, &atm_devs, pos);
477 v = (v == SEQ_START_TOKEN)
478 ? atm_devs.next : ((struct list_head *)v)->next;
479 return (v == &atm_devs) ? NULL : v;
480} 461}
481
482
483EXPORT_SYMBOL(atm_dev_register);
484EXPORT_SYMBOL(atm_dev_deregister);
485EXPORT_SYMBOL(atm_dev_lookup);
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 22992140052..ad1d28ae512 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -2,6 +2,7 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
5 6
6#include <linux/errno.h> /* error codes */ 7#include <linux/errno.h> /* error codes */
7#include <linux/kernel.h> /* printk */ 8#include <linux/kernel.h> /* printk */
@@ -17,7 +18,6 @@
17#include "resources.h" 18#include "resources.h"
18#include "signaling.h" 19#include "signaling.h"
19 20
20
21#undef WAIT_FOR_DEMON /* #define this if system calls on SVC sockets 21#undef WAIT_FOR_DEMON /* #define this if system calls on SVC sockets
22 should block until the demon runs. 22 should block until the demon runs.
23 Danger: may cause nasty hangs if the demon 23 Danger: may cause nasty hangs if the demon
@@ -28,60 +28,59 @@ struct atm_vcc *sigd = NULL;
28static DECLARE_WAIT_QUEUE_HEAD(sigd_sleep); 28static DECLARE_WAIT_QUEUE_HEAD(sigd_sleep);
29#endif 29#endif
30 30
31
32static void sigd_put_skb(struct sk_buff *skb) 31static void sigd_put_skb(struct sk_buff *skb)
33{ 32{
34#ifdef WAIT_FOR_DEMON 33#ifdef WAIT_FOR_DEMON
35 DECLARE_WAITQUEUE(wait,current); 34 DECLARE_WAITQUEUE(wait, current);
36 35
37 add_wait_queue(&sigd_sleep,&wait); 36 add_wait_queue(&sigd_sleep, &wait);
38 while (!sigd) { 37 while (!sigd) {
39 set_current_state(TASK_UNINTERRUPTIBLE); 38 set_current_state(TASK_UNINTERRUPTIBLE);
40 pr_debug("atmsvc: waiting for signaling demon...\n"); 39 pr_debug("atmsvc: waiting for signaling daemon...\n");
41 schedule(); 40 schedule();
42 } 41 }
43 current->state = TASK_RUNNING; 42 current->state = TASK_RUNNING;
44 remove_wait_queue(&sigd_sleep,&wait); 43 remove_wait_queue(&sigd_sleep, &wait);
45#else 44#else
46 if (!sigd) { 45 if (!sigd) {
47 pr_debug("atmsvc: no signaling demon\n"); 46 pr_debug("atmsvc: no signaling daemon\n");
48 kfree_skb(skb); 47 kfree_skb(skb);
49 return; 48 return;
50 } 49 }
51#endif 50#endif
52 atm_force_charge(sigd,skb->truesize); 51 atm_force_charge(sigd, skb->truesize);
53 skb_queue_tail(&sk_atm(sigd)->sk_receive_queue,skb); 52 skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb);
54 sk_atm(sigd)->sk_data_ready(sk_atm(sigd), skb->len); 53 sk_atm(sigd)->sk_data_ready(sk_atm(sigd), skb->len);
55} 54}
56 55
57 56static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg)
58static void modify_qos(struct atm_vcc *vcc,struct atmsvc_msg *msg)
59{ 57{
60 struct sk_buff *skb; 58 struct sk_buff *skb;
61 59
62 if (test_bit(ATM_VF_RELEASED,&vcc->flags) || 60 if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
63 !test_bit(ATM_VF_READY,&vcc->flags)) 61 !test_bit(ATM_VF_READY, &vcc->flags))
64 return; 62 return;
65 msg->type = as_error; 63 msg->type = as_error;
66 if (!vcc->dev->ops->change_qos) msg->reply = -EOPNOTSUPP; 64 if (!vcc->dev->ops->change_qos)
65 msg->reply = -EOPNOTSUPP;
67 else { 66 else {
68 /* should lock VCC */ 67 /* should lock VCC */
69 msg->reply = vcc->dev->ops->change_qos(vcc,&msg->qos, 68 msg->reply = vcc->dev->ops->change_qos(vcc, &msg->qos,
70 msg->reply); 69 msg->reply);
71 if (!msg->reply) msg->type = as_okay; 70 if (!msg->reply)
71 msg->type = as_okay;
72 } 72 }
73 /* 73 /*
74 * Should probably just turn around the old skb. But the, the buffer 74 * Should probably just turn around the old skb. But the, the buffer
75 * space accounting needs to follow the change too. Maybe later. 75 * space accounting needs to follow the change too. Maybe later.
76 */ 76 */
77 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg),GFP_KERNEL))) 77 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
78 schedule(); 78 schedule();
79 *(struct atmsvc_msg *) skb_put(skb,sizeof(struct atmsvc_msg)) = *msg; 79 *(struct atmsvc_msg *)skb_put(skb, sizeof(struct atmsvc_msg)) = *msg;
80 sigd_put_skb(skb); 80 sigd_put_skb(skb);
81} 81}
82 82
83 83static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
84static int sigd_send(struct atm_vcc *vcc,struct sk_buff *skb)
85{ 84{
86 struct atmsvc_msg *msg; 85 struct atmsvc_msg *msg;
87 struct atm_vcc *session_vcc; 86 struct atm_vcc *session_vcc;
@@ -90,69 +89,68 @@ static int sigd_send(struct atm_vcc *vcc,struct sk_buff *skb)
90 msg = (struct atmsvc_msg *) skb->data; 89 msg = (struct atmsvc_msg *) skb->data;
91 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); 90 atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
92 vcc = *(struct atm_vcc **) &msg->vcc; 91 vcc = *(struct atm_vcc **) &msg->vcc;
93 pr_debug("sigd_send %d (0x%lx)\n",(int) msg->type, 92 pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc);
94 (unsigned long) vcc);
95 sk = sk_atm(vcc); 93 sk = sk_atm(vcc);
96 94
97 switch (msg->type) { 95 switch (msg->type) {
98 case as_okay: 96 case as_okay:
99 sk->sk_err = -msg->reply; 97 sk->sk_err = -msg->reply;
100 clear_bit(ATM_VF_WAITING, &vcc->flags); 98 clear_bit(ATM_VF_WAITING, &vcc->flags);
101 if (!*vcc->local.sas_addr.prv && 99 if (!*vcc->local.sas_addr.prv && !*vcc->local.sas_addr.pub) {
102 !*vcc->local.sas_addr.pub) { 100 vcc->local.sas_family = AF_ATMSVC;
103 vcc->local.sas_family = AF_ATMSVC; 101 memcpy(vcc->local.sas_addr.prv,
104 memcpy(vcc->local.sas_addr.prv, 102 msg->local.sas_addr.prv, ATM_ESA_LEN);
105 msg->local.sas_addr.prv,ATM_ESA_LEN); 103 memcpy(vcc->local.sas_addr.pub,
106 memcpy(vcc->local.sas_addr.pub, 104 msg->local.sas_addr.pub, ATM_E164_LEN + 1);
107 msg->local.sas_addr.pub,ATM_E164_LEN+1); 105 }
108 } 106 session_vcc = vcc->session ? vcc->session : vcc;
109 session_vcc = vcc->session ? vcc->session : vcc; 107 if (session_vcc->vpi || session_vcc->vci)
110 if (session_vcc->vpi || session_vcc->vci) break;
111 session_vcc->itf = msg->pvc.sap_addr.itf;
112 session_vcc->vpi = msg->pvc.sap_addr.vpi;
113 session_vcc->vci = msg->pvc.sap_addr.vci;
114 if (session_vcc->vpi || session_vcc->vci)
115 session_vcc->qos = msg->qos;
116 break;
117 case as_error:
118 clear_bit(ATM_VF_REGIS,&vcc->flags);
119 clear_bit(ATM_VF_READY,&vcc->flags);
120 sk->sk_err = -msg->reply;
121 clear_bit(ATM_VF_WAITING, &vcc->flags);
122 break; 108 break;
123 case as_indicate: 109 session_vcc->itf = msg->pvc.sap_addr.itf;
124 vcc = *(struct atm_vcc **) &msg->listen_vcc; 110 session_vcc->vpi = msg->pvc.sap_addr.vpi;
125 sk = sk_atm(vcc); 111 session_vcc->vci = msg->pvc.sap_addr.vci;
126 pr_debug("as_indicate!!!\n"); 112 if (session_vcc->vpi || session_vcc->vci)
127 lock_sock(sk); 113 session_vcc->qos = msg->qos;
128 if (sk_acceptq_is_full(sk)) { 114 break;
129 sigd_enq(NULL,as_reject,vcc,NULL,NULL); 115 case as_error:
130 dev_kfree_skb(skb); 116 clear_bit(ATM_VF_REGIS, &vcc->flags);
131 goto as_indicate_complete; 117 clear_bit(ATM_VF_READY, &vcc->flags);
132 } 118 sk->sk_err = -msg->reply;
133 sk->sk_ack_backlog++; 119 clear_bit(ATM_VF_WAITING, &vcc->flags);
134 skb_queue_tail(&sk->sk_receive_queue, skb); 120 break;
135 pr_debug("waking sk->sk_sleep 0x%p\n", sk->sk_sleep); 121 case as_indicate:
136 sk->sk_state_change(sk); 122 vcc = *(struct atm_vcc **)&msg->listen_vcc;
123 sk = sk_atm(vcc);
124 pr_debug("as_indicate!!!\n");
125 lock_sock(sk);
126 if (sk_acceptq_is_full(sk)) {
127 sigd_enq(NULL, as_reject, vcc, NULL, NULL);
128 dev_kfree_skb(skb);
129 goto as_indicate_complete;
130 }
131 sk->sk_ack_backlog++;
132 skb_queue_tail(&sk->sk_receive_queue, skb);
133 pr_debug("waking sk->sk_sleep 0x%p\n", sk->sk_sleep);
134 sk->sk_state_change(sk);
137as_indicate_complete: 135as_indicate_complete:
138 release_sock(sk); 136 release_sock(sk);
139 return 0; 137 return 0;
140 case as_close: 138 case as_close:
141 set_bit(ATM_VF_RELEASED,&vcc->flags); 139 set_bit(ATM_VF_RELEASED, &vcc->flags);
142 vcc_release_async(vcc, msg->reply); 140 vcc_release_async(vcc, msg->reply);
143 goto out; 141 goto out;
144 case as_modify: 142 case as_modify:
145 modify_qos(vcc,msg); 143 modify_qos(vcc, msg);
146 break; 144 break;
147 case as_addparty: 145 case as_addparty:
148 case as_dropparty: 146 case as_dropparty:
149 sk->sk_err_soft = msg->reply; /* < 0 failure, otherwise ep_ref */ 147 sk->sk_err_soft = msg->reply;
150 clear_bit(ATM_VF_WAITING, &vcc->flags); 148 /* < 0 failure, otherwise ep_ref */
151 break; 149 clear_bit(ATM_VF_WAITING, &vcc->flags);
152 default: 150 break;
153 printk(KERN_ALERT "sigd_send: bad message type %d\n", 151 default:
154 (int) msg->type); 152 pr_alert("bad message type %d\n", (int)msg->type);
155 return -EINVAL; 153 return -EINVAL;
156 } 154 }
157 sk->sk_state_change(sk); 155 sk->sk_state_change(sk);
158out: 156out:
@@ -160,48 +158,52 @@ out:
160 return 0; 158 return 0;
161} 159}
162 160
163 161void sigd_enq2(struct atm_vcc *vcc, enum atmsvc_msg_type type,
164void sigd_enq2(struct atm_vcc *vcc,enum atmsvc_msg_type type, 162 struct atm_vcc *listen_vcc, const struct sockaddr_atmpvc *pvc,
165 struct atm_vcc *listen_vcc,const struct sockaddr_atmpvc *pvc, 163 const struct sockaddr_atmsvc *svc, const struct atm_qos *qos,
166 const struct sockaddr_atmsvc *svc,const struct atm_qos *qos,int reply) 164 int reply)
167{ 165{
168 struct sk_buff *skb; 166 struct sk_buff *skb;
169 struct atmsvc_msg *msg; 167 struct atmsvc_msg *msg;
170 static unsigned session = 0; 168 static unsigned session = 0;
171 169
172 pr_debug("sigd_enq %d (0x%p)\n",(int) type,vcc); 170 pr_debug("%d (0x%p)\n", (int)type, vcc);
173 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg),GFP_KERNEL))) 171 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
174 schedule(); 172 schedule();
175 msg = (struct atmsvc_msg *) skb_put(skb,sizeof(struct atmsvc_msg)); 173 msg = (struct atmsvc_msg *)skb_put(skb, sizeof(struct atmsvc_msg));
176 memset(msg,0,sizeof(*msg)); 174 memset(msg, 0, sizeof(*msg));
177 msg->type = type; 175 msg->type = type;
178 *(struct atm_vcc **) &msg->vcc = vcc; 176 *(struct atm_vcc **) &msg->vcc = vcc;
179 *(struct atm_vcc **) &msg->listen_vcc = listen_vcc; 177 *(struct atm_vcc **) &msg->listen_vcc = listen_vcc;
180 msg->reply = reply; 178 msg->reply = reply;
181 if (qos) msg->qos = *qos; 179 if (qos)
182 if (vcc) msg->sap = vcc->sap; 180 msg->qos = *qos;
183 if (svc) msg->svc = *svc; 181 if (vcc)
184 if (vcc) msg->local = vcc->local; 182 msg->sap = vcc->sap;
185 if (pvc) msg->pvc = *pvc; 183 if (svc)
184 msg->svc = *svc;
185 if (vcc)
186 msg->local = vcc->local;
187 if (pvc)
188 msg->pvc = *pvc;
186 if (vcc) { 189 if (vcc) {
187 if (type == as_connect && test_bit(ATM_VF_SESSION, &vcc->flags)) 190 if (type == as_connect && test_bit(ATM_VF_SESSION, &vcc->flags))
188 msg->session = ++session; 191 msg->session = ++session;
189 /* every new pmp connect gets the next session number */ 192 /* every new pmp connect gets the next session number */
190 } 193 }
191 sigd_put_skb(skb); 194 sigd_put_skb(skb);
192 if (vcc) set_bit(ATM_VF_REGIS,&vcc->flags); 195 if (vcc)
196 set_bit(ATM_VF_REGIS, &vcc->flags);
193} 197}
194 198
195 199void sigd_enq(struct atm_vcc *vcc, enum atmsvc_msg_type type,
196void sigd_enq(struct atm_vcc *vcc,enum atmsvc_msg_type type, 200 struct atm_vcc *listen_vcc, const struct sockaddr_atmpvc *pvc,
197 struct atm_vcc *listen_vcc,const struct sockaddr_atmpvc *pvc, 201 const struct sockaddr_atmsvc *svc)
198 const struct sockaddr_atmsvc *svc)
199{ 202{
200 sigd_enq2(vcc,type,listen_vcc,pvc,svc,vcc ? &vcc->qos : NULL,0); 203 sigd_enq2(vcc, type, listen_vcc, pvc, svc, vcc ? &vcc->qos : NULL, 0);
201 /* other ISP applications may use "reply" */ 204 /* other ISP applications may use "reply" */
202} 205}
203 206
204
205static void purge_vcc(struct atm_vcc *vcc) 207static void purge_vcc(struct atm_vcc *vcc)
206{ 208{
207 if (sk_atm(vcc)->sk_family == PF_ATMSVC && 209 if (sk_atm(vcc)->sk_family == PF_ATMSVC &&
@@ -212,21 +214,20 @@ static void purge_vcc(struct atm_vcc *vcc)
212 } 214 }
213} 215}
214 216
215
216static void sigd_close(struct atm_vcc *vcc) 217static void sigd_close(struct atm_vcc *vcc)
217{ 218{
218 struct hlist_node *node; 219 struct hlist_node *node;
219 struct sock *s; 220 struct sock *s;
220 int i; 221 int i;
221 222
222 pr_debug("sigd_close\n"); 223 pr_debug("\n");
223 sigd = NULL; 224 sigd = NULL;
224 if (skb_peek(&sk_atm(vcc)->sk_receive_queue)) 225 if (skb_peek(&sk_atm(vcc)->sk_receive_queue))
225 printk(KERN_ERR "sigd_close: closing with requests pending\n"); 226 pr_err("closing with requests pending\n");
226 skb_queue_purge(&sk_atm(vcc)->sk_receive_queue); 227 skb_queue_purge(&sk_atm(vcc)->sk_receive_queue);
227 228
228 read_lock(&vcc_sklist_lock); 229 read_lock(&vcc_sklist_lock);
229 for(i = 0; i < VCC_HTABLE_SIZE; ++i) { 230 for (i = 0; i < VCC_HTABLE_SIZE; ++i) {
230 struct hlist_head *head = &vcc_hash[i]; 231 struct hlist_head *head = &vcc_hash[i];
231 232
232 sk_for_each(s, node, head) { 233 sk_for_each(s, node, head) {
@@ -238,13 +239,11 @@ static void sigd_close(struct atm_vcc *vcc)
238 read_unlock(&vcc_sklist_lock); 239 read_unlock(&vcc_sklist_lock);
239} 240}
240 241
241
242static struct atmdev_ops sigd_dev_ops = { 242static struct atmdev_ops sigd_dev_ops = {
243 .close = sigd_close, 243 .close = sigd_close,
244 .send = sigd_send 244 .send = sigd_send
245}; 245};
246 246
247
248static struct atm_dev sigd_dev = { 247static struct atm_dev sigd_dev = {
249 .ops = &sigd_dev_ops, 248 .ops = &sigd_dev_ops,
250 .type = "sig", 249 .type = "sig",
@@ -252,16 +251,16 @@ static struct atm_dev sigd_dev = {
252 .lock = __SPIN_LOCK_UNLOCKED(sigd_dev.lock) 251 .lock = __SPIN_LOCK_UNLOCKED(sigd_dev.lock)
253}; 252};
254 253
255
256int sigd_attach(struct atm_vcc *vcc) 254int sigd_attach(struct atm_vcc *vcc)
257{ 255{
258 if (sigd) return -EADDRINUSE; 256 if (sigd)
259 pr_debug("sigd_attach\n"); 257 return -EADDRINUSE;
258 pr_debug("\n");
260 sigd = vcc; 259 sigd = vcc;
261 vcc->dev = &sigd_dev; 260 vcc->dev = &sigd_dev;
262 vcc_insert_socket(sk_atm(vcc)); 261 vcc_insert_socket(sk_atm(vcc));
263 set_bit(ATM_VF_META,&vcc->flags); 262 set_bit(ATM_VF_META, &vcc->flags);
264 set_bit(ATM_VF_READY,&vcc->flags); 263 set_bit(ATM_VF_READY, &vcc->flags);
265#ifdef WAIT_FOR_DEMON 264#ifdef WAIT_FOR_DEMON
266 wake_up(&sigd_sleep); 265 wake_up(&sigd_sleep);
267#endif 266#endif
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 66e1d9b3e5d..3ba9a45a51a 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -2,6 +2,7 @@
2 2
3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ 3/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
4 4
5#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
5 6
6#include <linux/string.h> 7#include <linux/string.h>
7#include <linux/net.h> /* struct socket, struct proto_ops */ 8#include <linux/net.h> /* struct socket, struct proto_ops */
@@ -18,14 +19,15 @@
18#include <linux/atmdev.h> 19#include <linux/atmdev.h>
19#include <linux/bitops.h> 20#include <linux/bitops.h>
20#include <net/sock.h> /* for sock_no_* */ 21#include <net/sock.h> /* for sock_no_* */
21#include <asm/uaccess.h> 22#include <linux/uaccess.h>
22 23
23#include "resources.h" 24#include "resources.h"
24#include "common.h" /* common for PVCs and SVCs */ 25#include "common.h" /* common for PVCs and SVCs */
25#include "signaling.h" 26#include "signaling.h"
26#include "addr.h" 27#include "addr.h"
27 28
28static int svc_create(struct net *net, struct socket *sock, int protocol, int kern); 29static int svc_create(struct net *net, struct socket *sock, int protocol,
30 int kern);
29 31
30/* 32/*
31 * Note: since all this is still nicely synchronized with the signaling demon, 33 * Note: since all this is still nicely synchronized with the signaling demon,
@@ -34,25 +36,25 @@ static int svc_create(struct net *net, struct socket *sock, int protocol, int ke
34 */ 36 */
35 37
36 38
37static int svc_shutdown(struct socket *sock,int how) 39static int svc_shutdown(struct socket *sock, int how)
38{ 40{
39 return 0; 41 return 0;
40} 42}
41 43
42
43static void svc_disconnect(struct atm_vcc *vcc) 44static void svc_disconnect(struct atm_vcc *vcc)
44{ 45{
45 DEFINE_WAIT(wait); 46 DEFINE_WAIT(wait);
46 struct sk_buff *skb; 47 struct sk_buff *skb;
47 struct sock *sk = sk_atm(vcc); 48 struct sock *sk = sk_atm(vcc);
48 49
49 pr_debug("svc_disconnect %p\n",vcc); 50 pr_debug("%p\n", vcc);
50 if (test_bit(ATM_VF_REGIS,&vcc->flags)) { 51 if (test_bit(ATM_VF_REGIS, &vcc->flags)) {
51 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 52 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
52 sigd_enq(vcc,as_close,NULL,NULL,NULL); 53 sigd_enq(vcc, as_close, NULL, NULL, NULL);
53 while (!test_bit(ATM_VF_RELEASED,&vcc->flags) && sigd) { 54 while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
54 schedule(); 55 schedule();
55 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 56 prepare_to_wait(sk->sk_sleep, &wait,
57 TASK_UNINTERRUPTIBLE);
56 } 58 }
57 finish_wait(sk->sk_sleep, &wait); 59 finish_wait(sk->sk_sleep, &wait);
58 } 60 }
@@ -61,35 +63,35 @@ static void svc_disconnect(struct atm_vcc *vcc)
61 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 63 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
62 atm_return(vcc, skb->truesize); 64 atm_return(vcc, skb->truesize);
63 pr_debug("LISTEN REL\n"); 65 pr_debug("LISTEN REL\n");
64 sigd_enq2(NULL,as_reject,vcc,NULL,NULL,&vcc->qos,0); 66 sigd_enq2(NULL, as_reject, vcc, NULL, NULL, &vcc->qos, 0);
65 dev_kfree_skb(skb); 67 dev_kfree_skb(skb);
66 } 68 }
67 clear_bit(ATM_VF_REGIS, &vcc->flags); 69 clear_bit(ATM_VF_REGIS, &vcc->flags);
68 /* ... may retry later */ 70 /* ... may retry later */
69} 71}
70 72
71
72static int svc_release(struct socket *sock) 73static int svc_release(struct socket *sock)
73{ 74{
74 struct sock *sk = sock->sk; 75 struct sock *sk = sock->sk;
75 struct atm_vcc *vcc; 76 struct atm_vcc *vcc;
76 77
77 if (sk) { 78 if (sk) {
78 vcc = ATM_SD(sock); 79 vcc = ATM_SD(sock);
79 pr_debug("svc_release %p\n", vcc); 80 pr_debug("%p\n", vcc);
80 clear_bit(ATM_VF_READY, &vcc->flags); 81 clear_bit(ATM_VF_READY, &vcc->flags);
81 /* VCC pointer is used as a reference, so we must not free it 82 /*
82 (thereby subjecting it to re-use) before all pending connections 83 * VCC pointer is used as a reference,
83 are closed */ 84 * so we must not free it (thereby subjecting it to re-use)
85 * before all pending connections are closed
86 */
84 svc_disconnect(vcc); 87 svc_disconnect(vcc);
85 vcc_release(sock); 88 vcc_release(sock);
86 } 89 }
87 return 0; 90 return 0;
88} 91}
89 92
90 93static int svc_bind(struct socket *sock, struct sockaddr *sockaddr,
91static int svc_bind(struct socket *sock,struct sockaddr *sockaddr, 94 int sockaddr_len)
92 int sockaddr_len)
93{ 95{
94 DEFINE_WAIT(wait); 96 DEFINE_WAIT(wait);
95 struct sock *sk = sock->sk; 97 struct sock *sk = sock->sk;
@@ -114,38 +116,37 @@ static int svc_bind(struct socket *sock,struct sockaddr *sockaddr,
114 error = -EAFNOSUPPORT; 116 error = -EAFNOSUPPORT;
115 goto out; 117 goto out;
116 } 118 }
117 clear_bit(ATM_VF_BOUND,&vcc->flags); 119 clear_bit(ATM_VF_BOUND, &vcc->flags);
118 /* failing rebind will kill old binding */ 120 /* failing rebind will kill old binding */
119 /* @@@ check memory (de)allocation on rebind */ 121 /* @@@ check memory (de)allocation on rebind */
120 if (!test_bit(ATM_VF_HASQOS,&vcc->flags)) { 122 if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
121 error = -EBADFD; 123 error = -EBADFD;
122 goto out; 124 goto out;
123 } 125 }
124 vcc->local = *addr; 126 vcc->local = *addr;
125 set_bit(ATM_VF_WAITING, &vcc->flags); 127 set_bit(ATM_VF_WAITING, &vcc->flags);
126 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 128 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
127 sigd_enq(vcc,as_bind,NULL,NULL,&vcc->local); 129 sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local);
128 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 130 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
129 schedule(); 131 schedule();
130 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 132 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
131 } 133 }
132 finish_wait(sk->sk_sleep, &wait); 134 finish_wait(sk->sk_sleep, &wait);
133 clear_bit(ATM_VF_REGIS,&vcc->flags); /* doesn't count */ 135 clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */
134 if (!sigd) { 136 if (!sigd) {
135 error = -EUNATCH; 137 error = -EUNATCH;
136 goto out; 138 goto out;
137 } 139 }
138 if (!sk->sk_err) 140 if (!sk->sk_err)
139 set_bit(ATM_VF_BOUND,&vcc->flags); 141 set_bit(ATM_VF_BOUND, &vcc->flags);
140 error = -sk->sk_err; 142 error = -sk->sk_err;
141out: 143out:
142 release_sock(sk); 144 release_sock(sk);
143 return error; 145 return error;
144} 146}
145 147
146 148static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
147static int svc_connect(struct socket *sock,struct sockaddr *sockaddr, 149 int sockaddr_len, int flags)
148 int sockaddr_len,int flags)
149{ 150{
150 DEFINE_WAIT(wait); 151 DEFINE_WAIT(wait);
151 struct sock *sk = sock->sk; 152 struct sock *sk = sock->sk;
@@ -153,7 +154,7 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
153 struct atm_vcc *vcc = ATM_SD(sock); 154 struct atm_vcc *vcc = ATM_SD(sock);
154 int error; 155 int error;
155 156
156 pr_debug("svc_connect %p\n",vcc); 157 pr_debug("%p\n", vcc);
157 lock_sock(sk); 158 lock_sock(sk);
158 if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) { 159 if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) {
159 error = -EINVAL; 160 error = -EINVAL;
@@ -201,7 +202,7 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
201 vcc->remote = *addr; 202 vcc->remote = *addr;
202 set_bit(ATM_VF_WAITING, &vcc->flags); 203 set_bit(ATM_VF_WAITING, &vcc->flags);
203 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 204 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
204 sigd_enq(vcc,as_connect,NULL,NULL,&vcc->remote); 205 sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote);
205 if (flags & O_NONBLOCK) { 206 if (flags & O_NONBLOCK) {
206 finish_wait(sk->sk_sleep, &wait); 207 finish_wait(sk->sk_sleep, &wait);
207 sock->state = SS_CONNECTING; 208 sock->state = SS_CONNECTING;
@@ -212,7 +213,8 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
212 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 213 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
213 schedule(); 214 schedule();
214 if (!signal_pending(current)) { 215 if (!signal_pending(current)) {
215 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 216 prepare_to_wait(sk->sk_sleep, &wait,
217 TASK_INTERRUPTIBLE);
216 continue; 218 continue;
217 } 219 }
218 pr_debug("*ABORT*\n"); 220 pr_debug("*ABORT*\n");
@@ -228,20 +230,22 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
228 * Kernel <--okay---- Demon 230 * Kernel <--okay---- Demon
229 * Kernel <--close--- Demon 231 * Kernel <--close--- Demon
230 */ 232 */
231 sigd_enq(vcc,as_close,NULL,NULL,NULL); 233 sigd_enq(vcc, as_close, NULL, NULL, NULL);
232 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 234 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
233 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 235 prepare_to_wait(sk->sk_sleep, &wait,
236 TASK_INTERRUPTIBLE);
234 schedule(); 237 schedule();
235 } 238 }
236 if (!sk->sk_err) 239 if (!sk->sk_err)
237 while (!test_bit(ATM_VF_RELEASED,&vcc->flags) 240 while (!test_bit(ATM_VF_RELEASED, &vcc->flags) &&
238 && sigd) { 241 sigd) {
239 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 242 prepare_to_wait(sk->sk_sleep, &wait,
243 TASK_INTERRUPTIBLE);
240 schedule(); 244 schedule();
241 } 245 }
242 clear_bit(ATM_VF_REGIS,&vcc->flags); 246 clear_bit(ATM_VF_REGIS, &vcc->flags);
243 clear_bit(ATM_VF_RELEASED,&vcc->flags); 247 clear_bit(ATM_VF_RELEASED, &vcc->flags);
244 clear_bit(ATM_VF_CLOSE,&vcc->flags); 248 clear_bit(ATM_VF_CLOSE, &vcc->flags);
245 /* we're gone now but may connect later */ 249 /* we're gone now but may connect later */
246 error = -EINTR; 250 error = -EINTR;
247 break; 251 break;
@@ -269,37 +273,37 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
269/* 273/*
270 * #endif 274 * #endif
271 */ 275 */
272 if (!(error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci))) 276 error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci);
277 if (!error)
273 sock->state = SS_CONNECTED; 278 sock->state = SS_CONNECTED;
274 else 279 else
275 (void) svc_disconnect(vcc); 280 (void)svc_disconnect(vcc);
276out: 281out:
277 release_sock(sk); 282 release_sock(sk);
278 return error; 283 return error;
279} 284}
280 285
281 286static int svc_listen(struct socket *sock, int backlog)
282static int svc_listen(struct socket *sock,int backlog)
283{ 287{
284 DEFINE_WAIT(wait); 288 DEFINE_WAIT(wait);
285 struct sock *sk = sock->sk; 289 struct sock *sk = sock->sk;
286 struct atm_vcc *vcc = ATM_SD(sock); 290 struct atm_vcc *vcc = ATM_SD(sock);
287 int error; 291 int error;
288 292
289 pr_debug("svc_listen %p\n",vcc); 293 pr_debug("%p\n", vcc);
290 lock_sock(sk); 294 lock_sock(sk);
291 /* let server handle listen on unbound sockets */ 295 /* let server handle listen on unbound sockets */
292 if (test_bit(ATM_VF_SESSION,&vcc->flags)) { 296 if (test_bit(ATM_VF_SESSION, &vcc->flags)) {
293 error = -EINVAL; 297 error = -EINVAL;
294 goto out; 298 goto out;
295 } 299 }
296 if (test_bit(ATM_VF_LISTEN, &vcc->flags)) { 300 if (test_bit(ATM_VF_LISTEN, &vcc->flags)) {
297 error = -EADDRINUSE; 301 error = -EADDRINUSE;
298 goto out; 302 goto out;
299 } 303 }
300 set_bit(ATM_VF_WAITING, &vcc->flags); 304 set_bit(ATM_VF_WAITING, &vcc->flags);
301 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 305 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
302 sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local); 306 sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local);
303 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 307 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
304 schedule(); 308 schedule();
305 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 309 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
@@ -309,7 +313,7 @@ static int svc_listen(struct socket *sock,int backlog)
309 error = -EUNATCH; 313 error = -EUNATCH;
310 goto out; 314 goto out;
311 } 315 }
312 set_bit(ATM_VF_LISTEN,&vcc->flags); 316 set_bit(ATM_VF_LISTEN, &vcc->flags);
313 vcc_insert_socket(sk); 317 vcc_insert_socket(sk);
314 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; 318 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
315 error = -sk->sk_err; 319 error = -sk->sk_err;
@@ -318,8 +322,7 @@ out:
318 return error; 322 return error;
319} 323}
320 324
321 325static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
322static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
323{ 326{
324 struct sock *sk = sock->sk; 327 struct sock *sk = sock->sk;
325 struct sk_buff *skb; 328 struct sk_buff *skb;
@@ -336,15 +339,16 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
336 339
337 new_vcc = ATM_SD(newsock); 340 new_vcc = ATM_SD(newsock);
338 341
339 pr_debug("svc_accept %p -> %p\n",old_vcc,new_vcc); 342 pr_debug("%p -> %p\n", old_vcc, new_vcc);
340 while (1) { 343 while (1) {
341 DEFINE_WAIT(wait); 344 DEFINE_WAIT(wait);
342 345
343 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 346 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
344 while (!(skb = skb_dequeue(&sk->sk_receive_queue)) && 347 while (!(skb = skb_dequeue(&sk->sk_receive_queue)) &&
345 sigd) { 348 sigd) {
346 if (test_bit(ATM_VF_RELEASED,&old_vcc->flags)) break; 349 if (test_bit(ATM_VF_RELEASED, &old_vcc->flags))
347 if (test_bit(ATM_VF_CLOSE,&old_vcc->flags)) { 350 break;
351 if (test_bit(ATM_VF_CLOSE, &old_vcc->flags)) {
348 error = -sk->sk_err; 352 error = -sk->sk_err;
349 break; 353 break;
350 } 354 }
@@ -359,7 +363,8 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
359 error = -ERESTARTSYS; 363 error = -ERESTARTSYS;
360 break; 364 break;
361 } 365 }
362 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 366 prepare_to_wait(sk->sk_sleep, &wait,
367 TASK_INTERRUPTIBLE);
363 } 368 }
364 finish_wait(sk->sk_sleep, &wait); 369 finish_wait(sk->sk_sleep, &wait);
365 if (error) 370 if (error)
@@ -368,31 +373,34 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
368 error = -EUNATCH; 373 error = -EUNATCH;
369 goto out; 374 goto out;
370 } 375 }
371 msg = (struct atmsvc_msg *) skb->data; 376 msg = (struct atmsvc_msg *)skb->data;
372 new_vcc->qos = msg->qos; 377 new_vcc->qos = msg->qos;
373 set_bit(ATM_VF_HASQOS,&new_vcc->flags); 378 set_bit(ATM_VF_HASQOS, &new_vcc->flags);
374 new_vcc->remote = msg->svc; 379 new_vcc->remote = msg->svc;
375 new_vcc->local = msg->local; 380 new_vcc->local = msg->local;
376 new_vcc->sap = msg->sap; 381 new_vcc->sap = msg->sap;
377 error = vcc_connect(newsock, msg->pvc.sap_addr.itf, 382 error = vcc_connect(newsock, msg->pvc.sap_addr.itf,
378 msg->pvc.sap_addr.vpi, msg->pvc.sap_addr.vci); 383 msg->pvc.sap_addr.vpi,
384 msg->pvc.sap_addr.vci);
379 dev_kfree_skb(skb); 385 dev_kfree_skb(skb);
380 sk->sk_ack_backlog--; 386 sk->sk_ack_backlog--;
381 if (error) { 387 if (error) {
382 sigd_enq2(NULL,as_reject,old_vcc,NULL,NULL, 388 sigd_enq2(NULL, as_reject, old_vcc, NULL, NULL,
383 &old_vcc->qos,error); 389 &old_vcc->qos, error);
384 error = error == -EAGAIN ? -EBUSY : error; 390 error = error == -EAGAIN ? -EBUSY : error;
385 goto out; 391 goto out;
386 } 392 }
387 /* wait should be short, so we ignore the non-blocking flag */ 393 /* wait should be short, so we ignore the non-blocking flag */
388 set_bit(ATM_VF_WAITING, &new_vcc->flags); 394 set_bit(ATM_VF_WAITING, &new_vcc->flags);
389 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 395 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait,
390 sigd_enq(new_vcc,as_accept,old_vcc,NULL,NULL); 396 TASK_UNINTERRUPTIBLE);
397 sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL);
391 while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) { 398 while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) {
392 release_sock(sk); 399 release_sock(sk);
393 schedule(); 400 schedule();
394 lock_sock(sk); 401 lock_sock(sk);
395 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 402 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait,
403 TASK_UNINTERRUPTIBLE);
396 } 404 }
397 finish_wait(sk_atm(new_vcc)->sk_sleep, &wait); 405 finish_wait(sk_atm(new_vcc)->sk_sleep, &wait);
398 if (!sigd) { 406 if (!sigd) {
@@ -412,39 +420,37 @@ out:
412 return error; 420 return error;
413} 421}
414 422
415 423static int svc_getname(struct socket *sock, struct sockaddr *sockaddr,
416static int svc_getname(struct socket *sock,struct sockaddr *sockaddr, 424 int *sockaddr_len, int peer)
417 int *sockaddr_len,int peer)
418{ 425{
419 struct sockaddr_atmsvc *addr; 426 struct sockaddr_atmsvc *addr;
420 427
421 *sockaddr_len = sizeof(struct sockaddr_atmsvc); 428 *sockaddr_len = sizeof(struct sockaddr_atmsvc);
422 addr = (struct sockaddr_atmsvc *) sockaddr; 429 addr = (struct sockaddr_atmsvc *) sockaddr;
423 memcpy(addr,peer ? &ATM_SD(sock)->remote : &ATM_SD(sock)->local, 430 memcpy(addr, peer ? &ATM_SD(sock)->remote : &ATM_SD(sock)->local,
424 sizeof(struct sockaddr_atmsvc)); 431 sizeof(struct sockaddr_atmsvc));
425 return 0; 432 return 0;
426} 433}
427 434
428 435int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
429int svc_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
430{ 436{
431 struct sock *sk = sk_atm(vcc); 437 struct sock *sk = sk_atm(vcc);
432 DEFINE_WAIT(wait); 438 DEFINE_WAIT(wait);
433 439
434 set_bit(ATM_VF_WAITING, &vcc->flags); 440 set_bit(ATM_VF_WAITING, &vcc->flags);
435 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 441 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
436 sigd_enq2(vcc,as_modify,NULL,NULL,&vcc->local,qos,0); 442 sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0);
437 while (test_bit(ATM_VF_WAITING, &vcc->flags) && 443 while (test_bit(ATM_VF_WAITING, &vcc->flags) &&
438 !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { 444 !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
439 schedule(); 445 schedule();
440 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 446 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
441 } 447 }
442 finish_wait(sk->sk_sleep, &wait); 448 finish_wait(sk->sk_sleep, &wait);
443 if (!sigd) return -EUNATCH; 449 if (!sigd)
450 return -EUNATCH;
444 return -sk->sk_err; 451 return -sk->sk_err;
445} 452}
446 453
447
448static int svc_setsockopt(struct socket *sock, int level, int optname, 454static int svc_setsockopt(struct socket *sock, int level, int optname,
449 char __user *optval, unsigned int optlen) 455 char __user *optval, unsigned int optlen)
450{ 456{
@@ -454,37 +460,35 @@ static int svc_setsockopt(struct socket *sock, int level, int optname,
454 460
455 lock_sock(sk); 461 lock_sock(sk);
456 switch (optname) { 462 switch (optname) {
457 case SO_ATMSAP: 463 case SO_ATMSAP:
458 if (level != SOL_ATM || optlen != sizeof(struct atm_sap)) { 464 if (level != SOL_ATM || optlen != sizeof(struct atm_sap)) {
459 error = -EINVAL; 465 error = -EINVAL;
460 goto out; 466 goto out;
461 } 467 }
462 if (copy_from_user(&vcc->sap, optval, optlen)) { 468 if (copy_from_user(&vcc->sap, optval, optlen)) {
463 error = -EFAULT; 469 error = -EFAULT;
464 goto out; 470 goto out;
465 } 471 }
466 set_bit(ATM_VF_HASSAP, &vcc->flags); 472 set_bit(ATM_VF_HASSAP, &vcc->flags);
467 break; 473 break;
468 case SO_MULTIPOINT: 474 case SO_MULTIPOINT:
469 if (level != SOL_ATM || optlen != sizeof(int)) { 475 if (level != SOL_ATM || optlen != sizeof(int)) {
470 error = -EINVAL; 476 error = -EINVAL;
471 goto out; 477 goto out;
472 } 478 }
473 if (get_user(value, (int __user *) optval)) { 479 if (get_user(value, (int __user *)optval)) {
474 error = -EFAULT; 480 error = -EFAULT;
475 goto out; 481 goto out;
476 } 482 }
477 if (value == 1) { 483 if (value == 1)
478 set_bit(ATM_VF_SESSION, &vcc->flags); 484 set_bit(ATM_VF_SESSION, &vcc->flags);
479 } else if (value == 0) { 485 else if (value == 0)
480 clear_bit(ATM_VF_SESSION, &vcc->flags); 486 clear_bit(ATM_VF_SESSION, &vcc->flags);
481 } else { 487 else
482 error = -EINVAL; 488 error = -EINVAL;
483 } 489 break;
484 break; 490 default:
485 default: 491 error = vcc_setsockopt(sock, level, optname, optval, optlen);
486 error = vcc_setsockopt(sock, level, optname,
487 optval, optlen);
488 } 492 }
489 493
490out: 494out:
@@ -492,9 +496,8 @@ out:
492 return error; 496 return error;
493} 497}
494 498
495 499static int svc_getsockopt(struct socket *sock, int level, int optname,
496static int svc_getsockopt(struct socket *sock,int level,int optname, 500 char __user *optval, int __user *optlen)
497 char __user *optval,int __user *optlen)
498{ 501{
499 struct sock *sk = sock->sk; 502 struct sock *sk = sock->sk;
500 int error = 0, len; 503 int error = 0, len;
@@ -521,7 +524,6 @@ out:
521 return error; 524 return error;
522} 525}
523 526
524
525static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr, 527static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
526 int sockaddr_len, int flags) 528 int sockaddr_len, int flags)
527{ 529{
@@ -540,7 +542,7 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
540 error = -EINPROGRESS; 542 error = -EINPROGRESS;
541 goto out; 543 goto out;
542 } 544 }
543 pr_debug("svc_addparty added wait queue\n"); 545 pr_debug("added wait queue\n");
544 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 546 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
545 schedule(); 547 schedule();
546 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 548 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
@@ -552,7 +554,6 @@ out:
552 return error; 554 return error;
553} 555}
554 556
555
556static int svc_dropparty(struct socket *sock, int ep_ref) 557static int svc_dropparty(struct socket *sock, int ep_ref)
557{ 558{
558 DEFINE_WAIT(wait); 559 DEFINE_WAIT(wait);
@@ -579,7 +580,6 @@ out:
579 return error; 580 return error;
580} 581}
581 582
582
583static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 583static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
584{ 584{
585 int error, ep_ref; 585 int error, ep_ref;
@@ -587,29 +587,31 @@ static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
587 struct atm_vcc *vcc = ATM_SD(sock); 587 struct atm_vcc *vcc = ATM_SD(sock);
588 588
589 switch (cmd) { 589 switch (cmd) {
590 case ATM_ADDPARTY: 590 case ATM_ADDPARTY:
591 if (!test_bit(ATM_VF_SESSION, &vcc->flags)) 591 if (!test_bit(ATM_VF_SESSION, &vcc->flags))
592 return -EINVAL; 592 return -EINVAL;
593 if (copy_from_user(&sa, (void __user *) arg, sizeof(sa))) 593 if (copy_from_user(&sa, (void __user *) arg, sizeof(sa)))
594 return -EFAULT; 594 return -EFAULT;
595 error = svc_addparty(sock, (struct sockaddr *) &sa, sizeof(sa), 0); 595 error = svc_addparty(sock, (struct sockaddr *)&sa, sizeof(sa),
596 break; 596 0);
597 case ATM_DROPPARTY: 597 break;
598 if (!test_bit(ATM_VF_SESSION, &vcc->flags)) 598 case ATM_DROPPARTY:
599 return -EINVAL; 599 if (!test_bit(ATM_VF_SESSION, &vcc->flags))
600 if (copy_from_user(&ep_ref, (void __user *) arg, sizeof(int))) 600 return -EINVAL;
601 return -EFAULT; 601 if (copy_from_user(&ep_ref, (void __user *) arg, sizeof(int)))
602 error = svc_dropparty(sock, ep_ref); 602 return -EFAULT;
603 break; 603 error = svc_dropparty(sock, ep_ref);
604 default: 604 break;
605 error = vcc_ioctl(sock, cmd, arg); 605 default:
606 error = vcc_ioctl(sock, cmd, arg);
606 } 607 }
607 608
608 return error; 609 return error;
609} 610}
610 611
611#ifdef CONFIG_COMPAT 612#ifdef CONFIG_COMPAT
612static int svc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 613static int svc_compat_ioctl(struct socket *sock, unsigned int cmd,
614 unsigned long arg)
613{ 615{
614 /* The definition of ATM_ADDPARTY uses the size of struct atm_iobuf. 616 /* The definition of ATM_ADDPARTY uses the size of struct atm_iobuf.
615 But actually it takes a struct sockaddr_atmsvc, which doesn't need 617 But actually it takes a struct sockaddr_atmsvc, which doesn't need
@@ -660,13 +662,13 @@ static int svc_create(struct net *net, struct socket *sock, int protocol,
660 662
661 sock->ops = &svc_proto_ops; 663 sock->ops = &svc_proto_ops;
662 error = vcc_create(net, sock, protocol, AF_ATMSVC); 664 error = vcc_create(net, sock, protocol, AF_ATMSVC);
663 if (error) return error; 665 if (error)
666 return error;
664 ATM_SD(sock)->local.sas_family = AF_ATMSVC; 667 ATM_SD(sock)->local.sas_family = AF_ATMSVC;
665 ATM_SD(sock)->remote.sas_family = AF_ATMSVC; 668 ATM_SD(sock)->remote.sas_family = AF_ATMSVC;
666 return 0; 669 return 0;
667} 670}
668 671
669
670static const struct net_proto_family svc_family_ops = { 672static const struct net_proto_family svc_family_ops = {
671 .family = PF_ATMSVC, 673 .family = PF_ATMSVC,
672 .create = svc_create, 674 .create = svc_create,
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 5588ba69c46..a5beedf43e2 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1863,25 +1863,13 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1863static void *ax25_info_start(struct seq_file *seq, loff_t *pos) 1863static void *ax25_info_start(struct seq_file *seq, loff_t *pos)
1864 __acquires(ax25_list_lock) 1864 __acquires(ax25_list_lock)
1865{ 1865{
1866 struct ax25_cb *ax25;
1867 struct hlist_node *node;
1868 int i = 0;
1869
1870 spin_lock_bh(&ax25_list_lock); 1866 spin_lock_bh(&ax25_list_lock);
1871 ax25_for_each(ax25, node, &ax25_list) { 1867 return seq_hlist_start(&ax25_list, *pos);
1872 if (i == *pos)
1873 return ax25;
1874 ++i;
1875 }
1876 return NULL;
1877} 1868}
1878 1869
1879static void *ax25_info_next(struct seq_file *seq, void *v, loff_t *pos) 1870static void *ax25_info_next(struct seq_file *seq, void *v, loff_t *pos)
1880{ 1871{
1881 ++*pos; 1872 return seq_hlist_next(v, &ax25_list, pos);
1882
1883 return hlist_entry( ((struct ax25_cb *)v)->ax25_node.next,
1884 struct ax25_cb, ax25_node);
1885} 1873}
1886 1874
1887static void ax25_info_stop(struct seq_file *seq, void *v) 1875static void ax25_info_stop(struct seq_file *seq, void *v)
@@ -1892,7 +1880,7 @@ static void ax25_info_stop(struct seq_file *seq, void *v)
1892 1880
1893static int ax25_info_show(struct seq_file *seq, void *v) 1881static int ax25_info_show(struct seq_file *seq, void *v)
1894{ 1882{
1895 ax25_cb *ax25 = v; 1883 ax25_cb *ax25 = hlist_entry(v, struct ax25_cb, ax25_node);
1896 char buf[11]; 1884 char buf[11];
1897 int k; 1885 int k;
1898 1886
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 832bcf092a0..9f13f6eefcb 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -146,31 +146,13 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
146static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos) 146static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos)
147 __acquires(ax25_uid_lock) 147 __acquires(ax25_uid_lock)
148{ 148{
149 struct ax25_uid_assoc *pt;
150 struct hlist_node *node;
151 int i = 1;
152
153 read_lock(&ax25_uid_lock); 149 read_lock(&ax25_uid_lock);
154 150 return seq_hlist_start_head(&ax25_uid_list, *pos);
155 if (*pos == 0)
156 return SEQ_START_TOKEN;
157
158 ax25_uid_for_each(pt, node, &ax25_uid_list) {
159 if (i == *pos)
160 return pt;
161 ++i;
162 }
163 return NULL;
164} 151}
165 152
166static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos) 153static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
167{ 154{
168 ++*pos; 155 return seq_hlist_next(v, &ax25_uid_list, pos);
169 if (v == SEQ_START_TOKEN)
170 return ax25_uid_list.first;
171 else
172 return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next,
173 ax25_uid_assoc, uid_node);
174} 156}
175 157
176static void ax25_uid_seq_stop(struct seq_file *seq, void *v) 158static void ax25_uid_seq_stop(struct seq_file *seq, void *v)
@@ -186,8 +168,9 @@ static int ax25_uid_seq_show(struct seq_file *seq, void *v)
186 if (v == SEQ_START_TOKEN) 168 if (v == SEQ_START_TOKEN)
187 seq_printf(seq, "Policy: %d\n", ax25_uid_policy); 169 seq_printf(seq, "Policy: %d\n", ax25_uid_policy);
188 else { 170 else {
189 struct ax25_uid_assoc *pt = v; 171 struct ax25_uid_assoc *pt;
190 172
173 pt = hlist_entry(v, struct ax25_uid_assoc, uid_node);
191 seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(buf, &pt->call)); 174 seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(buf, &pt->call));
192 } 175 }
193 return 0; 176 return 0;
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 26fb831ef7e..b6234b73c4c 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -64,7 +64,7 @@ static void bnep_net_set_mc_list(struct net_device *dev)
64 struct sk_buff *skb; 64 struct sk_buff *skb;
65 int size; 65 int size;
66 66
67 BT_DBG("%s mc_count %d", dev->name, dev->mc_count); 67 BT_DBG("%s mc_count %d", dev->name, netdev_mc_count(dev));
68 68
69 size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2; 69 size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2;
70 skb = alloc_skb(size, GFP_ATOMIC); 70 skb = alloc_skb(size, GFP_ATOMIC);
@@ -97,7 +97,9 @@ static void bnep_net_set_mc_list(struct net_device *dev)
97 97
98 /* FIXME: We should group addresses here. */ 98 /* FIXME: We should group addresses here. */
99 99
100 for (i = 0; i < dev->mc_count && i < BNEP_MAX_MULTICAST_FILTERS; i++) { 100 for (i = 0;
101 i < netdev_mc_count(dev) && i < BNEP_MAX_MULTICAST_FILTERS;
102 i++) {
101 memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); 103 memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN);
102 memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); 104 memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN);
103 dmi = dmi->next; 105 dmi = dmi->next;
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 97f8d68d574..3487cfe74ae 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -21,7 +21,8 @@
21*/ 21*/
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24 24#include <linux/proc_fs.h>
25#include <linux/seq_file.h>
25#include <linux/types.h> 26#include <linux/types.h>
26#include <linux/errno.h> 27#include <linux/errno.h>
27#include <linux/kernel.h> 28#include <linux/kernel.h>
@@ -516,33 +517,37 @@ static char *cmtp_procinfo(struct capi_ctr *ctrl)
516 return "CAPI Message Transport Protocol"; 517 return "CAPI Message Transport Protocol";
517} 518}
518 519
519static int cmtp_ctr_read_proc(char *page, char **start, off_t off, int count, int *eof, struct capi_ctr *ctrl) 520static int cmtp_proc_show(struct seq_file *m, void *v)
520{ 521{
522 struct capi_ctr *ctrl = m->private;
521 struct cmtp_session *session = ctrl->driverdata; 523 struct cmtp_session *session = ctrl->driverdata;
522 struct cmtp_application *app; 524 struct cmtp_application *app;
523 struct list_head *p, *n; 525 struct list_head *p, *n;
524 int len = 0;
525 526
526 len += sprintf(page + len, "%s\n\n", cmtp_procinfo(ctrl)); 527 seq_printf(m, "%s\n\n", cmtp_procinfo(ctrl));
527 len += sprintf(page + len, "addr %s\n", session->name); 528 seq_printf(m, "addr %s\n", session->name);
528 len += sprintf(page + len, "ctrl %d\n", session->num); 529 seq_printf(m, "ctrl %d\n", session->num);
529 530
530 list_for_each_safe(p, n, &session->applications) { 531 list_for_each_safe(p, n, &session->applications) {
531 app = list_entry(p, struct cmtp_application, list); 532 app = list_entry(p, struct cmtp_application, list);
532 len += sprintf(page + len, "appl %d -> %d\n", app->appl, app->mapping); 533 seq_printf(m, "appl %d -> %d\n", app->appl, app->mapping);
533 } 534 }
534 535
535 if (off + count >= len) 536 return 0;
536 *eof = 1; 537}
537
538 if (len < off)
539 return 0;
540
541 *start = page + off;
542 538
543 return ((count < len - off) ? count : len - off); 539static int cmtp_proc_open(struct inode *inode, struct file *file)
540{
541 return single_open(file, cmtp_proc_show, PDE(inode)->data);
544} 542}
545 543
544static const struct file_operations cmtp_proc_fops = {
545 .owner = THIS_MODULE,
546 .open = cmtp_proc_open,
547 .read = seq_read,
548 .llseek = seq_lseek,
549 .release = single_release,
550};
546 551
547int cmtp_attach_device(struct cmtp_session *session) 552int cmtp_attach_device(struct cmtp_session *session)
548{ 553{
@@ -582,7 +587,7 @@ int cmtp_attach_device(struct cmtp_session *session)
582 session->ctrl.send_message = cmtp_send_message; 587 session->ctrl.send_message = cmtp_send_message;
583 588
584 session->ctrl.procinfo = cmtp_procinfo; 589 session->ctrl.procinfo = cmtp_procinfo;
585 session->ctrl.ctr_read_proc = cmtp_ctr_read_proc; 590 session->ctrl.proc_fops = &cmtp_proc_fops;
586 591
587 if (attach_capi_ctr(&session->ctrl) < 0) { 592 if (attach_capi_ctr(&session->ctrl) < 0) {
588 BT_ERR("Can't attach new controller"); 593 BT_ERR("Can't attach new controller");
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index a2cbe61f6e6..bc2b1badab8 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -206,8 +206,6 @@ static struct net_device *new_bridge_dev(struct net *net, const char *name)
206 206
207 br_netfilter_rtable_init(br); 207 br_netfilter_rtable_init(br);
208 208
209 INIT_LIST_HEAD(&br->age_list);
210
211 br_stp_timer_init(br); 209 br_stp_timer_init(br);
212 210
213 return dev; 211 return dev;
@@ -467,7 +465,7 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
467 return 0; 465 return 0;
468} 466}
469 467
470void br_net_exit(struct net *net) 468void __net_exit br_net_exit(struct net *net)
471{ 469{
472 struct net_device *dev; 470 struct net_device *dev;
473 LIST_HEAD(list); 471 LIST_HEAD(list);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2114e45682e..1f0c4f44b76 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -93,7 +93,6 @@ struct net_bridge
93 struct net_device *dev; 93 struct net_device *dev;
94 spinlock_t hash_lock; 94 spinlock_t hash_lock;
95 struct hlist_head hash[BR_HASH_SIZE]; 95 struct hlist_head hash[BR_HASH_SIZE];
96 struct list_head age_list;
97 unsigned long feature_mask; 96 unsigned long feature_mask;
98#ifdef CONFIG_BRIDGE_NETFILTER 97#ifdef CONFIG_BRIDGE_NETFILTER
99 struct rtable fake_rtable; 98 struct rtable fake_rtable;
diff --git a/net/bridge/netfilter/ebt_802_3.c b/net/bridge/netfilter/ebt_802_3.c
index bd91dc58d49..5d1176758ca 100644
--- a/net/bridge/netfilter/ebt_802_3.c
+++ b/net/bridge/netfilter/ebt_802_3.c
@@ -52,7 +52,7 @@ static struct xt_match ebt_802_3_mt_reg __read_mostly = {
52 .family = NFPROTO_BRIDGE, 52 .family = NFPROTO_BRIDGE,
53 .match = ebt_802_3_mt, 53 .match = ebt_802_3_mt,
54 .checkentry = ebt_802_3_mt_check, 54 .checkentry = ebt_802_3_mt_check,
55 .matchsize = XT_ALIGN(sizeof(struct ebt_802_3_info)), 55 .matchsize = sizeof(struct ebt_802_3_info),
56 .me = THIS_MODULE, 56 .me = THIS_MODULE,
57}; 57};
58 58
diff --git a/net/bridge/netfilter/ebt_arp.c b/net/bridge/netfilter/ebt_arp.c
index b7ad60419f9..e727697c584 100644
--- a/net/bridge/netfilter/ebt_arp.c
+++ b/net/bridge/netfilter/ebt_arp.c
@@ -120,7 +120,7 @@ static struct xt_match ebt_arp_mt_reg __read_mostly = {
120 .family = NFPROTO_BRIDGE, 120 .family = NFPROTO_BRIDGE,
121 .match = ebt_arp_mt, 121 .match = ebt_arp_mt,
122 .checkentry = ebt_arp_mt_check, 122 .checkentry = ebt_arp_mt_check,
123 .matchsize = XT_ALIGN(sizeof(struct ebt_arp_info)), 123 .matchsize = sizeof(struct ebt_arp_info),
124 .me = THIS_MODULE, 124 .me = THIS_MODULE,
125}; 125};
126 126
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
index 76584cd72e5..f392e9d93f5 100644
--- a/net/bridge/netfilter/ebt_arpreply.c
+++ b/net/bridge/netfilter/ebt_arpreply.c
@@ -78,7 +78,7 @@ static struct xt_target ebt_arpreply_tg_reg __read_mostly = {
78 .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING), 78 .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING),
79 .target = ebt_arpreply_tg, 79 .target = ebt_arpreply_tg,
80 .checkentry = ebt_arpreply_tg_check, 80 .checkentry = ebt_arpreply_tg_check,
81 .targetsize = XT_ALIGN(sizeof(struct ebt_arpreply_info)), 81 .targetsize = sizeof(struct ebt_arpreply_info),
82 .me = THIS_MODULE, 82 .me = THIS_MODULE,
83}; 83};
84 84
diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
index 6b49ea9e31f..2bb40d728a3 100644
--- a/net/bridge/netfilter/ebt_dnat.c
+++ b/net/bridge/netfilter/ebt_dnat.c
@@ -54,7 +54,7 @@ static struct xt_target ebt_dnat_tg_reg __read_mostly = {
54 (1 << NF_BR_LOCAL_OUT) | (1 << NF_BR_BROUTING), 54 (1 << NF_BR_LOCAL_OUT) | (1 << NF_BR_BROUTING),
55 .target = ebt_dnat_tg, 55 .target = ebt_dnat_tg,
56 .checkentry = ebt_dnat_tg_check, 56 .checkentry = ebt_dnat_tg_check,
57 .targetsize = XT_ALIGN(sizeof(struct ebt_nat_info)), 57 .targetsize = sizeof(struct ebt_nat_info),
58 .me = THIS_MODULE, 58 .me = THIS_MODULE,
59}; 59};
60 60
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c
index d771bbfbcbe..5de6df6f86b 100644
--- a/net/bridge/netfilter/ebt_ip.c
+++ b/net/bridge/netfilter/ebt_ip.c
@@ -110,7 +110,7 @@ static struct xt_match ebt_ip_mt_reg __read_mostly = {
110 .family = NFPROTO_BRIDGE, 110 .family = NFPROTO_BRIDGE,
111 .match = ebt_ip_mt, 111 .match = ebt_ip_mt,
112 .checkentry = ebt_ip_mt_check, 112 .checkentry = ebt_ip_mt_check,
113 .matchsize = XT_ALIGN(sizeof(struct ebt_ip_info)), 113 .matchsize = sizeof(struct ebt_ip_info),
114 .me = THIS_MODULE, 114 .me = THIS_MODULE,
115}; 115};
116 116
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
index 784a6573876..bbf2534ef02 100644
--- a/net/bridge/netfilter/ebt_ip6.c
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -122,7 +122,7 @@ static struct xt_match ebt_ip6_mt_reg __read_mostly = {
122 .family = NFPROTO_BRIDGE, 122 .family = NFPROTO_BRIDGE,
123 .match = ebt_ip6_mt, 123 .match = ebt_ip6_mt,
124 .checkentry = ebt_ip6_mt_check, 124 .checkentry = ebt_ip6_mt_check,
125 .matchsize = XT_ALIGN(sizeof(struct ebt_ip6_info)), 125 .matchsize = sizeof(struct ebt_ip6_info),
126 .me = THIS_MODULE, 126 .me = THIS_MODULE,
127}; 127};
128 128
diff --git a/net/bridge/netfilter/ebt_limit.c b/net/bridge/netfilter/ebt_limit.c
index f7bd9192ff0..7a8182710eb 100644
--- a/net/bridge/netfilter/ebt_limit.c
+++ b/net/bridge/netfilter/ebt_limit.c
@@ -84,13 +84,29 @@ static bool ebt_limit_mt_check(const struct xt_mtchk_param *par)
84 return true; 84 return true;
85} 85}
86 86
87
88#ifdef CONFIG_COMPAT
89/*
90 * no conversion function needed --
91 * only avg/burst have meaningful values in userspace.
92 */
93struct ebt_compat_limit_info {
94 compat_uint_t avg, burst;
95 compat_ulong_t prev;
96 compat_uint_t credit, credit_cap, cost;
97};
98#endif
99
87static struct xt_match ebt_limit_mt_reg __read_mostly = { 100static struct xt_match ebt_limit_mt_reg __read_mostly = {
88 .name = "limit", 101 .name = "limit",
89 .revision = 0, 102 .revision = 0,
90 .family = NFPROTO_BRIDGE, 103 .family = NFPROTO_BRIDGE,
91 .match = ebt_limit_mt, 104 .match = ebt_limit_mt,
92 .checkentry = ebt_limit_mt_check, 105 .checkentry = ebt_limit_mt_check,
93 .matchsize = XT_ALIGN(sizeof(struct ebt_limit_info)), 106 .matchsize = sizeof(struct ebt_limit_info),
107#ifdef CONFIG_COMPAT
108 .compatsize = sizeof(struct ebt_compat_limit_info),
109#endif
94 .me = THIS_MODULE, 110 .me = THIS_MODULE,
95}; 111};
96 112
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index e4ea3fdd1d4..e873924ddb5 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -195,7 +195,7 @@ static struct xt_target ebt_log_tg_reg __read_mostly = {
195 .family = NFPROTO_BRIDGE, 195 .family = NFPROTO_BRIDGE,
196 .target = ebt_log_tg, 196 .target = ebt_log_tg,
197 .checkentry = ebt_log_tg_check, 197 .checkentry = ebt_log_tg_check,
198 .targetsize = XT_ALIGN(sizeof(struct ebt_log_info)), 198 .targetsize = sizeof(struct ebt_log_info),
199 .me = THIS_MODULE, 199 .me = THIS_MODULE,
200}; 200};
201 201
diff --git a/net/bridge/netfilter/ebt_mark.c b/net/bridge/netfilter/ebt_mark.c
index 2fee7e8e2e9..2b5ce533d6b 100644
--- a/net/bridge/netfilter/ebt_mark.c
+++ b/net/bridge/netfilter/ebt_mark.c
@@ -52,6 +52,32 @@ static bool ebt_mark_tg_check(const struct xt_tgchk_param *par)
52 return false; 52 return false;
53 return true; 53 return true;
54} 54}
55#ifdef CONFIG_COMPAT
56struct compat_ebt_mark_t_info {
57 compat_ulong_t mark;
58 compat_uint_t target;
59};
60
61static void mark_tg_compat_from_user(void *dst, const void *src)
62{
63 const struct compat_ebt_mark_t_info *user = src;
64 struct ebt_mark_t_info *kern = dst;
65
66 kern->mark = user->mark;
67 kern->target = user->target;
68}
69
70static int mark_tg_compat_to_user(void __user *dst, const void *src)
71{
72 struct compat_ebt_mark_t_info __user *user = dst;
73 const struct ebt_mark_t_info *kern = src;
74
75 if (put_user(kern->mark, &user->mark) ||
76 put_user(kern->target, &user->target))
77 return -EFAULT;
78 return 0;
79}
80#endif
55 81
56static struct xt_target ebt_mark_tg_reg __read_mostly = { 82static struct xt_target ebt_mark_tg_reg __read_mostly = {
57 .name = "mark", 83 .name = "mark",
@@ -59,7 +85,12 @@ static struct xt_target ebt_mark_tg_reg __read_mostly = {
59 .family = NFPROTO_BRIDGE, 85 .family = NFPROTO_BRIDGE,
60 .target = ebt_mark_tg, 86 .target = ebt_mark_tg,
61 .checkentry = ebt_mark_tg_check, 87 .checkentry = ebt_mark_tg_check,
62 .targetsize = XT_ALIGN(sizeof(struct ebt_mark_t_info)), 88 .targetsize = sizeof(struct ebt_mark_t_info),
89#ifdef CONFIG_COMPAT
90 .compatsize = sizeof(struct compat_ebt_mark_t_info),
91 .compat_from_user = mark_tg_compat_from_user,
92 .compat_to_user = mark_tg_compat_to_user,
93#endif
63 .me = THIS_MODULE, 94 .me = THIS_MODULE,
64}; 95};
65 96
diff --git a/net/bridge/netfilter/ebt_mark_m.c b/net/bridge/netfilter/ebt_mark_m.c
index ea570f214b1..8de8c396d91 100644
--- a/net/bridge/netfilter/ebt_mark_m.c
+++ b/net/bridge/netfilter/ebt_mark_m.c
@@ -35,13 +35,50 @@ static bool ebt_mark_mt_check(const struct xt_mtchk_param *par)
35 return true; 35 return true;
36} 36}
37 37
38
39#ifdef CONFIG_COMPAT
40struct compat_ebt_mark_m_info {
41 compat_ulong_t mark, mask;
42 uint8_t invert, bitmask;
43};
44
45static void mark_mt_compat_from_user(void *dst, const void *src)
46{
47 const struct compat_ebt_mark_m_info *user = src;
48 struct ebt_mark_m_info *kern = dst;
49
50 kern->mark = user->mark;
51 kern->mask = user->mask;
52 kern->invert = user->invert;
53 kern->bitmask = user->bitmask;
54}
55
56static int mark_mt_compat_to_user(void __user *dst, const void *src)
57{
58 struct compat_ebt_mark_m_info __user *user = dst;
59 const struct ebt_mark_m_info *kern = src;
60
61 if (put_user(kern->mark, &user->mark) ||
62 put_user(kern->mask, &user->mask) ||
63 put_user(kern->invert, &user->invert) ||
64 put_user(kern->bitmask, &user->bitmask))
65 return -EFAULT;
66 return 0;
67}
68#endif
69
38static struct xt_match ebt_mark_mt_reg __read_mostly = { 70static struct xt_match ebt_mark_mt_reg __read_mostly = {
39 .name = "mark_m", 71 .name = "mark_m",
40 .revision = 0, 72 .revision = 0,
41 .family = NFPROTO_BRIDGE, 73 .family = NFPROTO_BRIDGE,
42 .match = ebt_mark_mt, 74 .match = ebt_mark_mt,
43 .checkentry = ebt_mark_mt_check, 75 .checkentry = ebt_mark_mt_check,
44 .matchsize = XT_ALIGN(sizeof(struct ebt_mark_m_info)), 76 .matchsize = sizeof(struct ebt_mark_m_info),
77#ifdef CONFIG_COMPAT
78 .compatsize = sizeof(struct compat_ebt_mark_m_info),
79 .compat_from_user = mark_mt_compat_from_user,
80 .compat_to_user = mark_mt_compat_to_user,
81#endif
45 .me = THIS_MODULE, 82 .me = THIS_MODULE,
46}; 83};
47 84
diff --git a/net/bridge/netfilter/ebt_nflog.c b/net/bridge/netfilter/ebt_nflog.c
index 2a63d996dd4..40dbd248b9a 100644
--- a/net/bridge/netfilter/ebt_nflog.c
+++ b/net/bridge/netfilter/ebt_nflog.c
@@ -51,7 +51,7 @@ static struct xt_target ebt_nflog_tg_reg __read_mostly = {
51 .family = NFPROTO_BRIDGE, 51 .family = NFPROTO_BRIDGE,
52 .target = ebt_nflog_tg, 52 .target = ebt_nflog_tg,
53 .checkentry = ebt_nflog_tg_check, 53 .checkentry = ebt_nflog_tg_check,
54 .targetsize = XT_ALIGN(sizeof(struct ebt_nflog_info)), 54 .targetsize = sizeof(struct ebt_nflog_info),
55 .me = THIS_MODULE, 55 .me = THIS_MODULE,
56}; 56};
57 57
diff --git a/net/bridge/netfilter/ebt_pkttype.c b/net/bridge/netfilter/ebt_pkttype.c
index 883e96e2a54..e2a07e6cbef 100644
--- a/net/bridge/netfilter/ebt_pkttype.c
+++ b/net/bridge/netfilter/ebt_pkttype.c
@@ -36,7 +36,7 @@ static struct xt_match ebt_pkttype_mt_reg __read_mostly = {
36 .family = NFPROTO_BRIDGE, 36 .family = NFPROTO_BRIDGE,
37 .match = ebt_pkttype_mt, 37 .match = ebt_pkttype_mt,
38 .checkentry = ebt_pkttype_mt_check, 38 .checkentry = ebt_pkttype_mt_check,
39 .matchsize = XT_ALIGN(sizeof(struct ebt_pkttype_info)), 39 .matchsize = sizeof(struct ebt_pkttype_info),
40 .me = THIS_MODULE, 40 .me = THIS_MODULE,
41}; 41};
42 42
diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
index c8a49f7a57b..9be8fbcd370 100644
--- a/net/bridge/netfilter/ebt_redirect.c
+++ b/net/bridge/netfilter/ebt_redirect.c
@@ -59,7 +59,7 @@ static struct xt_target ebt_redirect_tg_reg __read_mostly = {
59 (1 << NF_BR_BROUTING), 59 (1 << NF_BR_BROUTING),
60 .target = ebt_redirect_tg, 60 .target = ebt_redirect_tg,
61 .checkentry = ebt_redirect_tg_check, 61 .checkentry = ebt_redirect_tg_check,
62 .targetsize = XT_ALIGN(sizeof(struct ebt_redirect_info)), 62 .targetsize = sizeof(struct ebt_redirect_info),
63 .me = THIS_MODULE, 63 .me = THIS_MODULE,
64}; 64};
65 65
diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
index 8d04d4c302b..9c7b520765a 100644
--- a/net/bridge/netfilter/ebt_snat.c
+++ b/net/bridge/netfilter/ebt_snat.c
@@ -67,7 +67,7 @@ static struct xt_target ebt_snat_tg_reg __read_mostly = {
67 .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_POST_ROUTING), 67 .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_POST_ROUTING),
68 .target = ebt_snat_tg, 68 .target = ebt_snat_tg,
69 .checkentry = ebt_snat_tg_check, 69 .checkentry = ebt_snat_tg_check,
70 .targetsize = XT_ALIGN(sizeof(struct ebt_nat_info)), 70 .targetsize = sizeof(struct ebt_nat_info),
71 .me = THIS_MODULE, 71 .me = THIS_MODULE,
72}; 72};
73 73
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 75e29a9cebd..92a93d36376 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -177,7 +177,7 @@ static struct xt_match ebt_stp_mt_reg __read_mostly = {
177 .family = NFPROTO_BRIDGE, 177 .family = NFPROTO_BRIDGE,
178 .match = ebt_stp_mt, 178 .match = ebt_stp_mt,
179 .checkentry = ebt_stp_mt_check, 179 .checkentry = ebt_stp_mt_check,
180 .matchsize = XT_ALIGN(sizeof(struct ebt_stp_info)), 180 .matchsize = sizeof(struct ebt_stp_info),
181 .me = THIS_MODULE, 181 .me = THIS_MODULE,
182}; 182};
183 183
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index ce50688a643..c6ac657074a 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -275,7 +275,7 @@ static struct xt_target ebt_ulog_tg_reg __read_mostly = {
275 .family = NFPROTO_BRIDGE, 275 .family = NFPROTO_BRIDGE,
276 .target = ebt_ulog_tg, 276 .target = ebt_ulog_tg,
277 .checkentry = ebt_ulog_tg_check, 277 .checkentry = ebt_ulog_tg_check,
278 .targetsize = XT_ALIGN(sizeof(struct ebt_ulog_info)), 278 .targetsize = sizeof(struct ebt_ulog_info),
279 .me = THIS_MODULE, 279 .me = THIS_MODULE,
280}; 280};
281 281
diff --git a/net/bridge/netfilter/ebt_vlan.c b/net/bridge/netfilter/ebt_vlan.c
index 3dddd489328..be1dd2e1f61 100644
--- a/net/bridge/netfilter/ebt_vlan.c
+++ b/net/bridge/netfilter/ebt_vlan.c
@@ -163,7 +163,7 @@ static struct xt_match ebt_vlan_mt_reg __read_mostly = {
163 .family = NFPROTO_BRIDGE, 163 .family = NFPROTO_BRIDGE,
164 .match = ebt_vlan_mt, 164 .match = ebt_vlan_mt,
165 .checkentry = ebt_vlan_mt_check, 165 .checkentry = ebt_vlan_mt_check,
166 .matchsize = XT_ALIGN(sizeof(struct ebt_vlan_info)), 166 .matchsize = sizeof(struct ebt_vlan_info),
167 .me = THIS_MODULE, 167 .me = THIS_MODULE,
168}; 168};
169 169
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index d32ab13e728..ae3f106c390 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -71,7 +71,7 @@ static int __net_init broute_net_init(struct net *net)
71 71
72static void __net_exit broute_net_exit(struct net *net) 72static void __net_exit broute_net_exit(struct net *net)
73{ 73{
74 ebt_unregister_table(net->xt.broute_table); 74 ebt_unregister_table(net, net->xt.broute_table);
75} 75}
76 76
77static struct pernet_operations broute_net_ops = { 77static struct pernet_operations broute_net_ops = {
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 60b1a6ca718..42e6bd09457 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -107,7 +107,7 @@ static int __net_init frame_filter_net_init(struct net *net)
107 107
108static void __net_exit frame_filter_net_exit(struct net *net) 108static void __net_exit frame_filter_net_exit(struct net *net)
109{ 109{
110 ebt_unregister_table(net->xt.frame_filter); 110 ebt_unregister_table(net, net->xt.frame_filter);
111} 111}
112 112
113static struct pernet_operations frame_filter_net_ops = { 113static struct pernet_operations frame_filter_net_ops = {
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 4a98804203b..6dc2f878ae0 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -107,7 +107,7 @@ static int __net_init frame_nat_net_init(struct net *net)
107 107
108static void __net_exit frame_nat_net_exit(struct net *net) 108static void __net_exit frame_nat_net_exit(struct net *net)
109{ 109{
110 ebt_unregister_table(net->xt.frame_nat); 110 ebt_unregister_table(net, net->xt.frame_nat);
111} 111}
112 112
113static struct pernet_operations frame_nat_net_ops = { 113static struct pernet_operations frame_nat_net_ops = {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 0b7f262cd14..dfb58056a89 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -33,11 +33,6 @@
33#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\ 33#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
34 "report to author: "format, ## args) 34 "report to author: "format, ## args)
35/* #define BUGPRINT(format, args...) */ 35/* #define BUGPRINT(format, args...) */
36#define MEMPRINT(format, args...) printk("kernel msg: ebtables "\
37 ": out of memory: "format, ## args)
38/* #define MEMPRINT(format, args...) */
39
40
41 36
42/* 37/*
43 * Each cpu has its own set of counters, so there is no need for write_lock in 38 * Each cpu has its own set of counters, so there is no need for write_lock in
@@ -56,11 +51,37 @@
56 51
57static DEFINE_MUTEX(ebt_mutex); 52static DEFINE_MUTEX(ebt_mutex);
58 53
54#ifdef CONFIG_COMPAT
55static void ebt_standard_compat_from_user(void *dst, const void *src)
56{
57 int v = *(compat_int_t *)src;
58
59 if (v >= 0)
60 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
61 memcpy(dst, &v, sizeof(v));
62}
63
64static int ebt_standard_compat_to_user(void __user *dst, const void *src)
65{
66 compat_int_t cv = *(int *)src;
67
68 if (cv >= 0)
69 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
70 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
71}
72#endif
73
74
59static struct xt_target ebt_standard_target = { 75static struct xt_target ebt_standard_target = {
60 .name = "standard", 76 .name = "standard",
61 .revision = 0, 77 .revision = 0,
62 .family = NFPROTO_BRIDGE, 78 .family = NFPROTO_BRIDGE,
63 .targetsize = sizeof(int), 79 .targetsize = sizeof(int),
80#ifdef CONFIG_COMPAT
81 .compatsize = sizeof(compat_int_t),
82 .compat_from_user = ebt_standard_compat_from_user,
83 .compat_to_user = ebt_standard_compat_to_user,
84#endif
64}; 85};
65 86
66static inline int 87static inline int
@@ -82,7 +103,8 @@ static inline int ebt_do_match (struct ebt_entry_match *m,
82 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH; 103 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
83} 104}
84 105
85static inline int ebt_dev_check(char *entry, const struct net_device *device) 106static inline int
107ebt_dev_check(const char *entry, const struct net_device *device)
86{ 108{
87 int i = 0; 109 int i = 0;
88 const char *devname; 110 const char *devname;
@@ -100,8 +122,9 @@ static inline int ebt_dev_check(char *entry, const struct net_device *device)
100 122
101#define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg)) 123#define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
102/* process standard matches */ 124/* process standard matches */
103static inline int ebt_basic_match(struct ebt_entry *e, struct ethhdr *h, 125static inline int
104 const struct net_device *in, const struct net_device *out) 126ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
127 const struct net_device *in, const struct net_device *out)
105{ 128{
106 int verdict, i; 129 int verdict, i;
107 130
@@ -156,12 +179,12 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
156 int i, nentries; 179 int i, nentries;
157 struct ebt_entry *point; 180 struct ebt_entry *point;
158 struct ebt_counter *counter_base, *cb_base; 181 struct ebt_counter *counter_base, *cb_base;
159 struct ebt_entry_target *t; 182 const struct ebt_entry_target *t;
160 int verdict, sp = 0; 183 int verdict, sp = 0;
161 struct ebt_chainstack *cs; 184 struct ebt_chainstack *cs;
162 struct ebt_entries *chaininfo; 185 struct ebt_entries *chaininfo;
163 char *base; 186 const char *base;
164 struct ebt_table_info *private; 187 const struct ebt_table_info *private;
165 bool hotdrop = false; 188 bool hotdrop = false;
166 struct xt_match_param mtpar; 189 struct xt_match_param mtpar;
167 struct xt_target_param tgpar; 190 struct xt_target_param tgpar;
@@ -395,7 +418,7 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
395 return 0; 418 return 0;
396} 419}
397 420
398static int ebt_verify_pointers(struct ebt_replace *repl, 421static int ebt_verify_pointers(const struct ebt_replace *repl,
399 struct ebt_table_info *newinfo) 422 struct ebt_table_info *newinfo)
400{ 423{
401 unsigned int limit = repl->entries_size; 424 unsigned int limit = repl->entries_size;
@@ -442,6 +465,8 @@ static int ebt_verify_pointers(struct ebt_replace *repl,
442 break; 465 break;
443 if (left < e->next_offset) 466 if (left < e->next_offset)
444 break; 467 break;
468 if (e->next_offset < sizeof(struct ebt_entry))
469 return -EINVAL;
445 offset += e->next_offset; 470 offset += e->next_offset;
446 } 471 }
447 } 472 }
@@ -466,8 +491,8 @@ static int ebt_verify_pointers(struct ebt_replace *repl,
466 * to parse the userspace data 491 * to parse the userspace data
467 */ 492 */
468static inline int 493static inline int
469ebt_check_entry_size_and_hooks(struct ebt_entry *e, 494ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
470 struct ebt_table_info *newinfo, 495 const struct ebt_table_info *newinfo,
471 unsigned int *n, unsigned int *cnt, 496 unsigned int *n, unsigned int *cnt,
472 unsigned int *totalcnt, unsigned int *udc_cnt) 497 unsigned int *totalcnt, unsigned int *udc_cnt)
473{ 498{
@@ -561,13 +586,14 @@ ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
561} 586}
562 587
563static inline int 588static inline int
564ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i) 589ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
565{ 590{
566 struct xt_mtdtor_param par; 591 struct xt_mtdtor_param par;
567 592
568 if (i && (*i)-- == 0) 593 if (i && (*i)-- == 0)
569 return 1; 594 return 1;
570 595
596 par.net = net;
571 par.match = m->u.match; 597 par.match = m->u.match;
572 par.matchinfo = m->data; 598 par.matchinfo = m->data;
573 par.family = NFPROTO_BRIDGE; 599 par.family = NFPROTO_BRIDGE;
@@ -578,13 +604,14 @@ ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i)
578} 604}
579 605
580static inline int 606static inline int
581ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i) 607ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
582{ 608{
583 struct xt_tgdtor_param par; 609 struct xt_tgdtor_param par;
584 610
585 if (i && (*i)-- == 0) 611 if (i && (*i)-- == 0)
586 return 1; 612 return 1;
587 613
614 par.net = net;
588 par.target = w->u.watcher; 615 par.target = w->u.watcher;
589 par.targinfo = w->data; 616 par.targinfo = w->data;
590 par.family = NFPROTO_BRIDGE; 617 par.family = NFPROTO_BRIDGE;
@@ -595,7 +622,7 @@ ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i)
595} 622}
596 623
597static inline int 624static inline int
598ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt) 625ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
599{ 626{
600 struct xt_tgdtor_param par; 627 struct xt_tgdtor_param par;
601 struct ebt_entry_target *t; 628 struct ebt_entry_target *t;
@@ -605,10 +632,11 @@ ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt)
605 /* we're done */ 632 /* we're done */
606 if (cnt && (*cnt)-- == 0) 633 if (cnt && (*cnt)-- == 0)
607 return 1; 634 return 1;
608 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, NULL); 635 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
609 EBT_MATCH_ITERATE(e, ebt_cleanup_match, NULL); 636 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
610 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); 637 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
611 638
639 par.net = net;
612 par.target = t->u.target; 640 par.target = t->u.target;
613 par.targinfo = t->data; 641 par.targinfo = t->data;
614 par.family = NFPROTO_BRIDGE; 642 par.family = NFPROTO_BRIDGE;
@@ -619,7 +647,8 @@ ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt)
619} 647}
620 648
621static inline int 649static inline int
622ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo, 650ebt_check_entry(struct ebt_entry *e, struct net *net,
651 const struct ebt_table_info *newinfo,
623 const char *name, unsigned int *cnt, 652 const char *name, unsigned int *cnt,
624 struct ebt_cl_stack *cl_s, unsigned int udc_cnt) 653 struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
625{ 654{
@@ -671,6 +700,7 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
671 } 700 }
672 i = 0; 701 i = 0;
673 702
703 mtpar.net = tgpar.net = net;
674 mtpar.table = tgpar.table = name; 704 mtpar.table = tgpar.table = name;
675 mtpar.entryinfo = tgpar.entryinfo = e; 705 mtpar.entryinfo = tgpar.entryinfo = e;
676 mtpar.hook_mask = tgpar.hook_mask = hookmask; 706 mtpar.hook_mask = tgpar.hook_mask = hookmask;
@@ -726,9 +756,9 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
726 (*cnt)++; 756 (*cnt)++;
727 return 0; 757 return 0;
728cleanup_watchers: 758cleanup_watchers:
729 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, &j); 759 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
730cleanup_matches: 760cleanup_matches:
731 EBT_MATCH_ITERATE(e, ebt_cleanup_match, &i); 761 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
732 return ret; 762 return ret;
733} 763}
734 764
@@ -737,12 +767,12 @@ cleanup_matches:
737 * the hook mask for udc tells us from which base chains the udc can be 767 * the hook mask for udc tells us from which base chains the udc can be
738 * accessed. This mask is a parameter to the check() functions of the extensions 768 * accessed. This mask is a parameter to the check() functions of the extensions
739 */ 769 */
740static int check_chainloops(struct ebt_entries *chain, struct ebt_cl_stack *cl_s, 770static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
741 unsigned int udc_cnt, unsigned int hooknr, char *base) 771 unsigned int udc_cnt, unsigned int hooknr, char *base)
742{ 772{
743 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict; 773 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
744 struct ebt_entry *e = (struct ebt_entry *)chain->data; 774 const struct ebt_entry *e = (struct ebt_entry *)chain->data;
745 struct ebt_entry_target *t; 775 const struct ebt_entry_target *t;
746 776
747 while (pos < nentries || chain_nr != -1) { 777 while (pos < nentries || chain_nr != -1) {
748 /* end of udc, go back one 'recursion' step */ 778 /* end of udc, go back one 'recursion' step */
@@ -808,7 +838,8 @@ letscontinue:
808} 838}
809 839
810/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */ 840/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
811static int translate_table(char *name, struct ebt_table_info *newinfo) 841static int translate_table(struct net *net, const char *name,
842 struct ebt_table_info *newinfo)
812{ 843{
813 unsigned int i, j, k, udc_cnt; 844 unsigned int i, j, k, udc_cnt;
814 int ret; 845 int ret;
@@ -917,17 +948,17 @@ static int translate_table(char *name, struct ebt_table_info *newinfo)
917 /* used to know what we need to clean up if something goes wrong */ 948 /* used to know what we need to clean up if something goes wrong */
918 i = 0; 949 i = 0;
919 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 950 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
920 ebt_check_entry, newinfo, name, &i, cl_s, udc_cnt); 951 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
921 if (ret != 0) { 952 if (ret != 0) {
922 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 953 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
923 ebt_cleanup_entry, &i); 954 ebt_cleanup_entry, net, &i);
924 } 955 }
925 vfree(cl_s); 956 vfree(cl_s);
926 return ret; 957 return ret;
927} 958}
928 959
929/* called under write_lock */ 960/* called under write_lock */
930static void get_counters(struct ebt_counter *oldcounters, 961static void get_counters(const struct ebt_counter *oldcounters,
931 struct ebt_counter *counters, unsigned int nentries) 962 struct ebt_counter *counters, unsigned int nentries)
932{ 963{
933 int i, cpu; 964 int i, cpu;
@@ -949,90 +980,45 @@ static void get_counters(struct ebt_counter *oldcounters,
949 } 980 }
950} 981}
951 982
952/* replace the table */ 983static int do_replace_finish(struct net *net, struct ebt_replace *repl,
953static int do_replace(struct net *net, void __user *user, unsigned int len) 984 struct ebt_table_info *newinfo)
954{ 985{
955 int ret, i, countersize; 986 int ret, i;
956 struct ebt_table_info *newinfo;
957 struct ebt_replace tmp;
958 struct ebt_table *t;
959 struct ebt_counter *counterstmp = NULL; 987 struct ebt_counter *counterstmp = NULL;
960 /* used to be able to unlock earlier */ 988 /* used to be able to unlock earlier */
961 struct ebt_table_info *table; 989 struct ebt_table_info *table;
962 990 struct ebt_table *t;
963 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
964 return -EFAULT;
965
966 if (len != sizeof(tmp) + tmp.entries_size) {
967 BUGPRINT("Wrong len argument\n");
968 return -EINVAL;
969 }
970
971 if (tmp.entries_size == 0) {
972 BUGPRINT("Entries_size never zero\n");
973 return -EINVAL;
974 }
975 /* overflow check */
976 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS -
977 SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
978 return -ENOMEM;
979 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
980 return -ENOMEM;
981
982 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
983 newinfo = vmalloc(sizeof(*newinfo) + countersize);
984 if (!newinfo)
985 return -ENOMEM;
986
987 if (countersize)
988 memset(newinfo->counters, 0, countersize);
989
990 newinfo->entries = vmalloc(tmp.entries_size);
991 if (!newinfo->entries) {
992 ret = -ENOMEM;
993 goto free_newinfo;
994 }
995 if (copy_from_user(
996 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
997 BUGPRINT("Couldn't copy entries from userspace\n");
998 ret = -EFAULT;
999 goto free_entries;
1000 }
1001 991
1002 /* the user wants counters back 992 /* the user wants counters back
1003 the check on the size is done later, when we have the lock */ 993 the check on the size is done later, when we have the lock */
1004 if (tmp.num_counters) { 994 if (repl->num_counters) {
1005 counterstmp = vmalloc(tmp.num_counters * sizeof(*counterstmp)); 995 unsigned long size = repl->num_counters * sizeof(*counterstmp);
1006 if (!counterstmp) { 996 counterstmp = vmalloc(size);
1007 ret = -ENOMEM; 997 if (!counterstmp)
1008 goto free_entries; 998 return -ENOMEM;
1009 }
1010 } 999 }
1011 else
1012 counterstmp = NULL;
1013 1000
1014 /* this can get initialized by translate_table() */
1015 newinfo->chainstack = NULL; 1001 newinfo->chainstack = NULL;
1016 ret = ebt_verify_pointers(&tmp, newinfo); 1002 ret = ebt_verify_pointers(repl, newinfo);
1017 if (ret != 0) 1003 if (ret != 0)
1018 goto free_counterstmp; 1004 goto free_counterstmp;
1019 1005
1020 ret = translate_table(tmp.name, newinfo); 1006 ret = translate_table(net, repl->name, newinfo);
1021 1007
1022 if (ret != 0) 1008 if (ret != 0)
1023 goto free_counterstmp; 1009 goto free_counterstmp;
1024 1010
1025 t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); 1011 t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1026 if (!t) { 1012 if (!t) {
1027 ret = -ENOENT; 1013 ret = -ENOENT;
1028 goto free_iterate; 1014 goto free_iterate;
1029 } 1015 }
1030 1016
1031 /* the table doesn't like it */ 1017 /* the table doesn't like it */
1032 if (t->check && (ret = t->check(newinfo, tmp.valid_hooks))) 1018 if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1033 goto free_unlock; 1019 goto free_unlock;
1034 1020
1035 if (tmp.num_counters && tmp.num_counters != t->private->nentries) { 1021 if (repl->num_counters && repl->num_counters != t->private->nentries) {
1036 BUGPRINT("Wrong nr. of counters requested\n"); 1022 BUGPRINT("Wrong nr. of counters requested\n");
1037 ret = -EINVAL; 1023 ret = -EINVAL;
1038 goto free_unlock; 1024 goto free_unlock;
@@ -1048,7 +1034,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1048 module_put(t->me); 1034 module_put(t->me);
1049 /* we need an atomic snapshot of the counters */ 1035 /* we need an atomic snapshot of the counters */
1050 write_lock_bh(&t->lock); 1036 write_lock_bh(&t->lock);
1051 if (tmp.num_counters) 1037 if (repl->num_counters)
1052 get_counters(t->private->counters, counterstmp, 1038 get_counters(t->private->counters, counterstmp,
1053 t->private->nentries); 1039 t->private->nentries);
1054 1040
@@ -1059,10 +1045,9 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1059 allocation. Only reason why this is done is because this way the lock 1045 allocation. Only reason why this is done is because this way the lock
1060 is held only once, while this doesn't bring the kernel into a 1046 is held only once, while this doesn't bring the kernel into a
1061 dangerous state. */ 1047 dangerous state. */
1062 if (tmp.num_counters && 1048 if (repl->num_counters &&
1063 copy_to_user(tmp.counters, counterstmp, 1049 copy_to_user(repl->counters, counterstmp,
1064 tmp.num_counters * sizeof(struct ebt_counter))) { 1050 repl->num_counters * sizeof(struct ebt_counter))) {
1065 BUGPRINT("Couldn't copy counters to userspace\n");
1066 ret = -EFAULT; 1051 ret = -EFAULT;
1067 } 1052 }
1068 else 1053 else
@@ -1070,7 +1055,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1070 1055
1071 /* decrease module count and free resources */ 1056 /* decrease module count and free resources */
1072 EBT_ENTRY_ITERATE(table->entries, table->entries_size, 1057 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1073 ebt_cleanup_entry, NULL); 1058 ebt_cleanup_entry, net, NULL);
1074 1059
1075 vfree(table->entries); 1060 vfree(table->entries);
1076 if (table->chainstack) { 1061 if (table->chainstack) {
@@ -1087,7 +1072,7 @@ free_unlock:
1087 mutex_unlock(&ebt_mutex); 1072 mutex_unlock(&ebt_mutex);
1088free_iterate: 1073free_iterate:
1089 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 1074 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1090 ebt_cleanup_entry, NULL); 1075 ebt_cleanup_entry, net, NULL);
1091free_counterstmp: 1076free_counterstmp:
1092 vfree(counterstmp); 1077 vfree(counterstmp);
1093 /* can be initialized in translate_table() */ 1078 /* can be initialized in translate_table() */
@@ -1096,6 +1081,59 @@ free_counterstmp:
1096 vfree(newinfo->chainstack[i]); 1081 vfree(newinfo->chainstack[i]);
1097 vfree(newinfo->chainstack); 1082 vfree(newinfo->chainstack);
1098 } 1083 }
1084 return ret;
1085}
1086
1087/* replace the table */
1088static int do_replace(struct net *net, const void __user *user,
1089 unsigned int len)
1090{
1091 int ret, countersize;
1092 struct ebt_table_info *newinfo;
1093 struct ebt_replace tmp;
1094
1095 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1096 return -EFAULT;
1097
1098 if (len != sizeof(tmp) + tmp.entries_size) {
1099 BUGPRINT("Wrong len argument\n");
1100 return -EINVAL;
1101 }
1102
1103 if (tmp.entries_size == 0) {
1104 BUGPRINT("Entries_size never zero\n");
1105 return -EINVAL;
1106 }
1107 /* overflow check */
1108 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1109 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1110 return -ENOMEM;
1111 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1112 return -ENOMEM;
1113
1114 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1115 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1116 if (!newinfo)
1117 return -ENOMEM;
1118
1119 if (countersize)
1120 memset(newinfo->counters, 0, countersize);
1121
1122 newinfo->entries = vmalloc(tmp.entries_size);
1123 if (!newinfo->entries) {
1124 ret = -ENOMEM;
1125 goto free_newinfo;
1126 }
1127 if (copy_from_user(
1128 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1129 BUGPRINT("Couldn't copy entries from userspace\n");
1130 ret = -EFAULT;
1131 goto free_entries;
1132 }
1133
1134 ret = do_replace_finish(net, &tmp, newinfo);
1135 if (ret == 0)
1136 return ret;
1099free_entries: 1137free_entries:
1100 vfree(newinfo->entries); 1138 vfree(newinfo->entries);
1101free_newinfo: 1139free_newinfo:
@@ -1154,7 +1192,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table)
1154 newinfo->hook_entry[i] = p + 1192 newinfo->hook_entry[i] = p +
1155 ((char *)repl->hook_entry[i] - repl->entries); 1193 ((char *)repl->hook_entry[i] - repl->entries);
1156 } 1194 }
1157 ret = translate_table(repl->name, newinfo); 1195 ret = translate_table(net, repl->name, newinfo);
1158 if (ret != 0) { 1196 if (ret != 0) {
1159 BUGPRINT("Translate_table failed\n"); 1197 BUGPRINT("Translate_table failed\n");
1160 goto free_chainstack; 1198 goto free_chainstack;
@@ -1204,7 +1242,7 @@ out:
1204 return ERR_PTR(ret); 1242 return ERR_PTR(ret);
1205} 1243}
1206 1244
1207void ebt_unregister_table(struct ebt_table *table) 1245void ebt_unregister_table(struct net *net, struct ebt_table *table)
1208{ 1246{
1209 int i; 1247 int i;
1210 1248
@@ -1216,7 +1254,7 @@ void ebt_unregister_table(struct ebt_table *table)
1216 list_del(&table->list); 1254 list_del(&table->list);
1217 mutex_unlock(&ebt_mutex); 1255 mutex_unlock(&ebt_mutex);
1218 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, 1256 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1219 ebt_cleanup_entry, NULL); 1257 ebt_cleanup_entry, net, NULL);
1220 if (table->private->nentries) 1258 if (table->private->nentries)
1221 module_put(table->me); 1259 module_put(table->me);
1222 vfree(table->private->entries); 1260 vfree(table->private->entries);
@@ -1230,39 +1268,33 @@ void ebt_unregister_table(struct ebt_table *table)
1230} 1268}
1231 1269
1232/* userspace just supplied us with counters */ 1270/* userspace just supplied us with counters */
1233static int update_counters(struct net *net, void __user *user, unsigned int len) 1271static int do_update_counters(struct net *net, const char *name,
1272 struct ebt_counter __user *counters,
1273 unsigned int num_counters,
1274 const void __user *user, unsigned int len)
1234{ 1275{
1235 int i, ret; 1276 int i, ret;
1236 struct ebt_counter *tmp; 1277 struct ebt_counter *tmp;
1237 struct ebt_replace hlp;
1238 struct ebt_table *t; 1278 struct ebt_table *t;
1239 1279
1240 if (copy_from_user(&hlp, user, sizeof(hlp))) 1280 if (num_counters == 0)
1241 return -EFAULT;
1242
1243 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1244 return -EINVAL;
1245 if (hlp.num_counters == 0)
1246 return -EINVAL; 1281 return -EINVAL;
1247 1282
1248 if (!(tmp = vmalloc(hlp.num_counters * sizeof(*tmp)))) { 1283 tmp = vmalloc(num_counters * sizeof(*tmp));
1249 MEMPRINT("Update_counters && nomemory\n"); 1284 if (!tmp)
1250 return -ENOMEM; 1285 return -ENOMEM;
1251 }
1252 1286
1253 t = find_table_lock(net, hlp.name, &ret, &ebt_mutex); 1287 t = find_table_lock(net, name, &ret, &ebt_mutex);
1254 if (!t) 1288 if (!t)
1255 goto free_tmp; 1289 goto free_tmp;
1256 1290
1257 if (hlp.num_counters != t->private->nentries) { 1291 if (num_counters != t->private->nentries) {
1258 BUGPRINT("Wrong nr of counters\n"); 1292 BUGPRINT("Wrong nr of counters\n");
1259 ret = -EINVAL; 1293 ret = -EINVAL;
1260 goto unlock_mutex; 1294 goto unlock_mutex;
1261 } 1295 }
1262 1296
1263 if ( copy_from_user(tmp, hlp.counters, 1297 if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1264 hlp.num_counters * sizeof(struct ebt_counter)) ) {
1265 BUGPRINT("Updata_counters && !cfu\n");
1266 ret = -EFAULT; 1298 ret = -EFAULT;
1267 goto unlock_mutex; 1299 goto unlock_mutex;
1268 } 1300 }
@@ -1271,7 +1303,7 @@ static int update_counters(struct net *net, void __user *user, unsigned int len)
1271 write_lock_bh(&t->lock); 1303 write_lock_bh(&t->lock);
1272 1304
1273 /* we add to the counters of the first cpu */ 1305 /* we add to the counters of the first cpu */
1274 for (i = 0; i < hlp.num_counters; i++) { 1306 for (i = 0; i < num_counters; i++) {
1275 t->private->counters[i].pcnt += tmp[i].pcnt; 1307 t->private->counters[i].pcnt += tmp[i].pcnt;
1276 t->private->counters[i].bcnt += tmp[i].bcnt; 1308 t->private->counters[i].bcnt += tmp[i].bcnt;
1277 } 1309 }
@@ -1285,8 +1317,23 @@ free_tmp:
1285 return ret; 1317 return ret;
1286} 1318}
1287 1319
1288static inline int ebt_make_matchname(struct ebt_entry_match *m, 1320static int update_counters(struct net *net, const void __user *user,
1289 char *base, char __user *ubase) 1321 unsigned int len)
1322{
1323 struct ebt_replace hlp;
1324
1325 if (copy_from_user(&hlp, user, sizeof(hlp)))
1326 return -EFAULT;
1327
1328 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1329 return -EINVAL;
1330
1331 return do_update_counters(net, hlp.name, hlp.counters,
1332 hlp.num_counters, user, len);
1333}
1334
1335static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1336 const char *base, char __user *ubase)
1290{ 1337{
1291 char __user *hlp = ubase + ((char *)m - base); 1338 char __user *hlp = ubase + ((char *)m - base);
1292 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN)) 1339 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
@@ -1294,8 +1341,8 @@ static inline int ebt_make_matchname(struct ebt_entry_match *m,
1294 return 0; 1341 return 0;
1295} 1342}
1296 1343
1297static inline int ebt_make_watchername(struct ebt_entry_watcher *w, 1344static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1298 char *base, char __user *ubase) 1345 const char *base, char __user *ubase)
1299{ 1346{
1300 char __user *hlp = ubase + ((char *)w - base); 1347 char __user *hlp = ubase + ((char *)w - base);
1301 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN)) 1348 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
@@ -1303,11 +1350,12 @@ static inline int ebt_make_watchername(struct ebt_entry_watcher *w,
1303 return 0; 1350 return 0;
1304} 1351}
1305 1352
1306static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *ubase) 1353static inline int
1354ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1307{ 1355{
1308 int ret; 1356 int ret;
1309 char __user *hlp; 1357 char __user *hlp;
1310 struct ebt_entry_target *t; 1358 const struct ebt_entry_target *t;
1311 1359
1312 if (e->bitmask == 0) 1360 if (e->bitmask == 0)
1313 return 0; 1361 return 0;
@@ -1326,13 +1374,46 @@ static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *u
1326 return 0; 1374 return 0;
1327} 1375}
1328 1376
1377static int copy_counters_to_user(struct ebt_table *t,
1378 const struct ebt_counter *oldcounters,
1379 void __user *user, unsigned int num_counters,
1380 unsigned int nentries)
1381{
1382 struct ebt_counter *counterstmp;
1383 int ret = 0;
1384
1385 /* userspace might not need the counters */
1386 if (num_counters == 0)
1387 return 0;
1388
1389 if (num_counters != nentries) {
1390 BUGPRINT("Num_counters wrong\n");
1391 return -EINVAL;
1392 }
1393
1394 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1395 if (!counterstmp)
1396 return -ENOMEM;
1397
1398 write_lock_bh(&t->lock);
1399 get_counters(oldcounters, counterstmp, nentries);
1400 write_unlock_bh(&t->lock);
1401
1402 if (copy_to_user(user, counterstmp,
1403 nentries * sizeof(struct ebt_counter)))
1404 ret = -EFAULT;
1405 vfree(counterstmp);
1406 return ret;
1407}
1408
1329/* called with ebt_mutex locked */ 1409/* called with ebt_mutex locked */
1330static int copy_everything_to_user(struct ebt_table *t, void __user *user, 1410static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1331 int *len, int cmd) 1411 const int *len, int cmd)
1332{ 1412{
1333 struct ebt_replace tmp; 1413 struct ebt_replace tmp;
1334 struct ebt_counter *counterstmp, *oldcounters; 1414 const struct ebt_counter *oldcounters;
1335 unsigned int entries_size, nentries; 1415 unsigned int entries_size, nentries;
1416 int ret;
1336 char *entries; 1417 char *entries;
1337 1418
1338 if (cmd == EBT_SO_GET_ENTRIES) { 1419 if (cmd == EBT_SO_GET_ENTRIES) {
@@ -1347,16 +1428,12 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1347 oldcounters = t->table->counters; 1428 oldcounters = t->table->counters;
1348 } 1429 }
1349 1430
1350 if (copy_from_user(&tmp, user, sizeof(tmp))) { 1431 if (copy_from_user(&tmp, user, sizeof(tmp)))
1351 BUGPRINT("Cfu didn't work\n");
1352 return -EFAULT; 1432 return -EFAULT;
1353 }
1354 1433
1355 if (*len != sizeof(struct ebt_replace) + entries_size + 1434 if (*len != sizeof(struct ebt_replace) + entries_size +
1356 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0)) { 1435 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1357 BUGPRINT("Wrong size\n");
1358 return -EINVAL; 1436 return -EINVAL;
1359 }
1360 1437
1361 if (tmp.nentries != nentries) { 1438 if (tmp.nentries != nentries) {
1362 BUGPRINT("Nentries wrong\n"); 1439 BUGPRINT("Nentries wrong\n");
@@ -1368,29 +1445,10 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1368 return -EINVAL; 1445 return -EINVAL;
1369 } 1446 }
1370 1447
1371 /* userspace might not need the counters */ 1448 ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1372 if (tmp.num_counters) { 1449 tmp.num_counters, nentries);
1373 if (tmp.num_counters != nentries) { 1450 if (ret)
1374 BUGPRINT("Num_counters wrong\n"); 1451 return ret;
1375 return -EINVAL;
1376 }
1377 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1378 if (!counterstmp) {
1379 MEMPRINT("Couldn't copy counters, out of memory\n");
1380 return -ENOMEM;
1381 }
1382 write_lock_bh(&t->lock);
1383 get_counters(oldcounters, counterstmp, nentries);
1384 write_unlock_bh(&t->lock);
1385
1386 if (copy_to_user(tmp.counters, counterstmp,
1387 nentries * sizeof(struct ebt_counter))) {
1388 BUGPRINT("Couldn't copy counters to userspace\n");
1389 vfree(counterstmp);
1390 return -EFAULT;
1391 }
1392 vfree(counterstmp);
1393 }
1394 1452
1395 if (copy_to_user(tmp.entries, entries, entries_size)) { 1453 if (copy_to_user(tmp.entries, entries, entries_size)) {
1396 BUGPRINT("Couldn't copy entries to userspace\n"); 1454 BUGPRINT("Couldn't copy entries to userspace\n");
@@ -1418,7 +1476,7 @@ static int do_ebt_set_ctl(struct sock *sk,
1418 break; 1476 break;
1419 default: 1477 default:
1420 ret = -EINVAL; 1478 ret = -EINVAL;
1421 } 1479 }
1422 return ret; 1480 return ret;
1423} 1481}
1424 1482
@@ -1478,15 +1536,892 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1478 return ret; 1536 return ret;
1479} 1537}
1480 1538
1539#ifdef CONFIG_COMPAT
1540/* 32 bit-userspace compatibility definitions. */
1541struct compat_ebt_replace {
1542 char name[EBT_TABLE_MAXNAMELEN];
1543 compat_uint_t valid_hooks;
1544 compat_uint_t nentries;
1545 compat_uint_t entries_size;
1546 /* start of the chains */
1547 compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1548 /* nr of counters userspace expects back */
1549 compat_uint_t num_counters;
1550 /* where the kernel will put the old counters. */
1551 compat_uptr_t counters;
1552 compat_uptr_t entries;
1553};
1554
1555/* struct ebt_entry_match, _target and _watcher have same layout */
1556struct compat_ebt_entry_mwt {
1557 union {
1558 char name[EBT_FUNCTION_MAXNAMELEN];
1559 compat_uptr_t ptr;
1560 } u;
1561 compat_uint_t match_size;
1562 compat_uint_t data[0];
1563};
1564
1565/* account for possible padding between match_size and ->data */
1566static int ebt_compat_entry_padsize(void)
1567{
1568 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1569 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1570 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1571 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1572}
1573
1574static int ebt_compat_match_offset(const struct xt_match *match,
1575 unsigned int userlen)
1576{
1577 /*
1578 * ebt_among needs special handling. The kernel .matchsize is
1579 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1580 * value is expected.
1581 * Example: userspace sends 4500, ebt_among.c wants 4504.
1582 */
1583 if (unlikely(match->matchsize == -1))
1584 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1585 return xt_compat_match_offset(match);
1586}
1587
1588static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1589 unsigned int *size)
1590{
1591 const struct xt_match *match = m->u.match;
1592 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1593 int off = ebt_compat_match_offset(match, m->match_size);
1594 compat_uint_t msize = m->match_size - off;
1595
1596 BUG_ON(off >= m->match_size);
1597
1598 if (copy_to_user(cm->u.name, match->name,
1599 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1600 return -EFAULT;
1601
1602 if (match->compat_to_user) {
1603 if (match->compat_to_user(cm->data, m->data))
1604 return -EFAULT;
1605 } else if (copy_to_user(cm->data, m->data, msize))
1606 return -EFAULT;
1607
1608 *size -= ebt_compat_entry_padsize() + off;
1609 *dstptr = cm->data;
1610 *dstptr += msize;
1611 return 0;
1612}
1613
1614static int compat_target_to_user(struct ebt_entry_target *t,
1615 void __user **dstptr,
1616 unsigned int *size)
1617{
1618 const struct xt_target *target = t->u.target;
1619 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1620 int off = xt_compat_target_offset(target);
1621 compat_uint_t tsize = t->target_size - off;
1622
1623 BUG_ON(off >= t->target_size);
1624
1625 if (copy_to_user(cm->u.name, target->name,
1626 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1627 return -EFAULT;
1628
1629 if (target->compat_to_user) {
1630 if (target->compat_to_user(cm->data, t->data))
1631 return -EFAULT;
1632 } else if (copy_to_user(cm->data, t->data, tsize))
1633 return -EFAULT;
1634
1635 *size -= ebt_compat_entry_padsize() + off;
1636 *dstptr = cm->data;
1637 *dstptr += tsize;
1638 return 0;
1639}
1640
1641static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1642 void __user **dstptr,
1643 unsigned int *size)
1644{
1645 return compat_target_to_user((struct ebt_entry_target *)w,
1646 dstptr, size);
1647}
1648
1649static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1650 unsigned int *size)
1651{
1652 struct ebt_entry_target *t;
1653 struct ebt_entry __user *ce;
1654 u32 watchers_offset, target_offset, next_offset;
1655 compat_uint_t origsize;
1656 int ret;
1657
1658 if (e->bitmask == 0) {
1659 if (*size < sizeof(struct ebt_entries))
1660 return -EINVAL;
1661 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1662 return -EFAULT;
1663
1664 *dstptr += sizeof(struct ebt_entries);
1665 *size -= sizeof(struct ebt_entries);
1666 return 0;
1667 }
1668
1669 if (*size < sizeof(*ce))
1670 return -EINVAL;
1671
1672 ce = (struct ebt_entry __user *)*dstptr;
1673 if (copy_to_user(ce, e, sizeof(*ce)))
1674 return -EFAULT;
1675
1676 origsize = *size;
1677 *dstptr += sizeof(*ce);
1678
1679 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1680 if (ret)
1681 return ret;
1682 watchers_offset = e->watchers_offset - (origsize - *size);
1683
1684 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1685 if (ret)
1686 return ret;
1687 target_offset = e->target_offset - (origsize - *size);
1688
1689 t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1690
1691 ret = compat_target_to_user(t, dstptr, size);
1692 if (ret)
1693 return ret;
1694 next_offset = e->next_offset - (origsize - *size);
1695
1696 if (put_user(watchers_offset, &ce->watchers_offset) ||
1697 put_user(target_offset, &ce->target_offset) ||
1698 put_user(next_offset, &ce->next_offset))
1699 return -EFAULT;
1700
1701 *size -= sizeof(*ce);
1702 return 0;
1703}
1704
1705static int compat_calc_match(struct ebt_entry_match *m, int *off)
1706{
1707 *off += ebt_compat_match_offset(m->u.match, m->match_size);
1708 *off += ebt_compat_entry_padsize();
1709 return 0;
1710}
1711
1712static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1713{
1714 *off += xt_compat_target_offset(w->u.watcher);
1715 *off += ebt_compat_entry_padsize();
1716 return 0;
1717}
1718
1719static int compat_calc_entry(const struct ebt_entry *e,
1720 const struct ebt_table_info *info,
1721 const void *base,
1722 struct compat_ebt_replace *newinfo)
1723{
1724 const struct ebt_entry_target *t;
1725 unsigned int entry_offset;
1726 int off, ret, i;
1727
1728 if (e->bitmask == 0)
1729 return 0;
1730
1731 off = 0;
1732 entry_offset = (void *)e - base;
1733
1734 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1735 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1736
1737 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1738
1739 off += xt_compat_target_offset(t->u.target);
1740 off += ebt_compat_entry_padsize();
1741
1742 newinfo->entries_size -= off;
1743
1744 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1745 if (ret)
1746 return ret;
1747
1748 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1749 const void *hookptr = info->hook_entry[i];
1750 if (info->hook_entry[i] &&
1751 (e < (struct ebt_entry *)(base - hookptr))) {
1752 newinfo->hook_entry[i] -= off;
1753 pr_debug("0x%08X -> 0x%08X\n",
1754 newinfo->hook_entry[i] + off,
1755 newinfo->hook_entry[i]);
1756 }
1757 }
1758
1759 return 0;
1760}
1761
1762
1763static int compat_table_info(const struct ebt_table_info *info,
1764 struct compat_ebt_replace *newinfo)
1765{
1766 unsigned int size = info->entries_size;
1767 const void *entries = info->entries;
1768
1769 newinfo->entries_size = size;
1770
1771 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1772 entries, newinfo);
1773}
1774
1775static int compat_copy_everything_to_user(struct ebt_table *t,
1776 void __user *user, int *len, int cmd)
1777{
1778 struct compat_ebt_replace repl, tmp;
1779 struct ebt_counter *oldcounters;
1780 struct ebt_table_info tinfo;
1781 int ret;
1782 void __user *pos;
1783
1784 memset(&tinfo, 0, sizeof(tinfo));
1785
1786 if (cmd == EBT_SO_GET_ENTRIES) {
1787 tinfo.entries_size = t->private->entries_size;
1788 tinfo.nentries = t->private->nentries;
1789 tinfo.entries = t->private->entries;
1790 oldcounters = t->private->counters;
1791 } else {
1792 tinfo.entries_size = t->table->entries_size;
1793 tinfo.nentries = t->table->nentries;
1794 tinfo.entries = t->table->entries;
1795 oldcounters = t->table->counters;
1796 }
1797
1798 if (copy_from_user(&tmp, user, sizeof(tmp)))
1799 return -EFAULT;
1800
1801 if (tmp.nentries != tinfo.nentries ||
1802 (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1803 return -EINVAL;
1804
1805 memcpy(&repl, &tmp, sizeof(repl));
1806 if (cmd == EBT_SO_GET_ENTRIES)
1807 ret = compat_table_info(t->private, &repl);
1808 else
1809 ret = compat_table_info(&tinfo, &repl);
1810 if (ret)
1811 return ret;
1812
1813 if (*len != sizeof(tmp) + repl.entries_size +
1814 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1815 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1816 *len, tinfo.entries_size, repl.entries_size);
1817 return -EINVAL;
1818 }
1819
1820 /* userspace might not need the counters */
1821 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1822 tmp.num_counters, tinfo.nentries);
1823 if (ret)
1824 return ret;
1825
1826 pos = compat_ptr(tmp.entries);
1827 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1828 compat_copy_entry_to_user, &pos, &tmp.entries_size);
1829}
1830
1831struct ebt_entries_buf_state {
1832 char *buf_kern_start; /* kernel buffer to copy (translated) data to */
1833 u32 buf_kern_len; /* total size of kernel buffer */
1834 u32 buf_kern_offset; /* amount of data copied so far */
1835 u32 buf_user_offset; /* read position in userspace buffer */
1836};
1837
1838static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1839{
1840 state->buf_kern_offset += sz;
1841 return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1842}
1843
1844static int ebt_buf_add(struct ebt_entries_buf_state *state,
1845 void *data, unsigned int sz)
1846{
1847 if (state->buf_kern_start == NULL)
1848 goto count_only;
1849
1850 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1851
1852 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1853
1854 count_only:
1855 state->buf_user_offset += sz;
1856 return ebt_buf_count(state, sz);
1857}
1858
1859static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1860{
1861 char *b = state->buf_kern_start;
1862
1863 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1864
1865 if (b != NULL && sz > 0)
1866 memset(b + state->buf_kern_offset, 0, sz);
1867 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1868 return ebt_buf_count(state, sz);
1869}
1870
1871enum compat_mwt {
1872 EBT_COMPAT_MATCH,
1873 EBT_COMPAT_WATCHER,
1874 EBT_COMPAT_TARGET,
1875};
1876
1877static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1878 enum compat_mwt compat_mwt,
1879 struct ebt_entries_buf_state *state,
1880 const unsigned char *base)
1881{
1882 char name[EBT_FUNCTION_MAXNAMELEN];
1883 struct xt_match *match;
1884 struct xt_target *wt;
1885 void *dst = NULL;
1886 int off, pad = 0, ret = 0;
1887 unsigned int size_kern, entry_offset, match_size = mwt->match_size;
1888
1889 strlcpy(name, mwt->u.name, sizeof(name));
1890
1891 if (state->buf_kern_start)
1892 dst = state->buf_kern_start + state->buf_kern_offset;
1893
1894 entry_offset = (unsigned char *) mwt - base;
1895 switch (compat_mwt) {
1896 case EBT_COMPAT_MATCH:
1897 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
1898 name, 0), "ebt_%s", name);
1899 if (match == NULL)
1900 return -ENOENT;
1901 if (IS_ERR(match))
1902 return PTR_ERR(match);
1903
1904 off = ebt_compat_match_offset(match, match_size);
1905 if (dst) {
1906 if (match->compat_from_user)
1907 match->compat_from_user(dst, mwt->data);
1908 else
1909 memcpy(dst, mwt->data, match_size);
1910 }
1911
1912 size_kern = match->matchsize;
1913 if (unlikely(size_kern == -1))
1914 size_kern = match_size;
1915 module_put(match->me);
1916 break;
1917 case EBT_COMPAT_WATCHER: /* fallthrough */
1918 case EBT_COMPAT_TARGET:
1919 wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
1920 name, 0), "ebt_%s", name);
1921 if (wt == NULL)
1922 return -ENOENT;
1923 if (IS_ERR(wt))
1924 return PTR_ERR(wt);
1925 off = xt_compat_target_offset(wt);
1926
1927 if (dst) {
1928 if (wt->compat_from_user)
1929 wt->compat_from_user(dst, mwt->data);
1930 else
1931 memcpy(dst, mwt->data, match_size);
1932 }
1933
1934 size_kern = wt->targetsize;
1935 module_put(wt->me);
1936 break;
1937 }
1938
1939 if (!dst) {
1940 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
1941 off + ebt_compat_entry_padsize());
1942 if (ret < 0)
1943 return ret;
1944 }
1945
1946 state->buf_kern_offset += match_size + off;
1947 state->buf_user_offset += match_size;
1948 pad = XT_ALIGN(size_kern) - size_kern;
1949
1950 if (pad > 0 && dst) {
1951 BUG_ON(state->buf_kern_len <= pad);
1952 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1953 memset(dst + size_kern, 0, pad);
1954 }
1955 return off + match_size;
1956}
1957
1958/*
1959 * return size of all matches, watchers or target, including necessary
1960 * alignment and padding.
1961 */
1962static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1963 unsigned int size_left, enum compat_mwt type,
1964 struct ebt_entries_buf_state *state, const void *base)
1965{
1966 int growth = 0;
1967 char *buf;
1968
1969 if (size_left == 0)
1970 return 0;
1971
1972 buf = (char *) match32;
1973
1974 while (size_left >= sizeof(*match32)) {
1975 struct ebt_entry_match *match_kern;
1976 int ret;
1977
1978 match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1979 if (match_kern) {
1980 char *tmp;
1981 tmp = state->buf_kern_start + state->buf_kern_offset;
1982 match_kern = (struct ebt_entry_match *) tmp;
1983 }
1984 ret = ebt_buf_add(state, buf, sizeof(*match32));
1985 if (ret < 0)
1986 return ret;
1987 size_left -= sizeof(*match32);
1988
1989 /* add padding before match->data (if any) */
1990 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1991 if (ret < 0)
1992 return ret;
1993
1994 if (match32->match_size > size_left)
1995 return -EINVAL;
1996
1997 size_left -= match32->match_size;
1998
1999 ret = compat_mtw_from_user(match32, type, state, base);
2000 if (ret < 0)
2001 return ret;
2002
2003 BUG_ON(ret < match32->match_size);
2004 growth += ret - match32->match_size;
2005 growth += ebt_compat_entry_padsize();
2006
2007 buf += sizeof(*match32);
2008 buf += match32->match_size;
2009
2010 if (match_kern)
2011 match_kern->match_size = ret;
2012
2013 WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2014 match32 = (struct compat_ebt_entry_mwt *) buf;
2015 }
2016
2017 return growth;
2018}
2019
2020#define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2021({ \
2022 unsigned int __i; \
2023 int __ret = 0; \
2024 struct compat_ebt_entry_mwt *__watcher; \
2025 \
2026 for (__i = e->watchers_offset; \
2027 __i < (e)->target_offset; \
2028 __i += __watcher->watcher_size + \
2029 sizeof(struct compat_ebt_entry_mwt)) { \
2030 __watcher = (void *)(e) + __i; \
2031 __ret = fn(__watcher , ## args); \
2032 if (__ret != 0) \
2033 break; \
2034 } \
2035 if (__ret == 0) { \
2036 if (__i != (e)->target_offset) \
2037 __ret = -EINVAL; \
2038 } \
2039 __ret; \
2040})
2041
2042#define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2043({ \
2044 unsigned int __i; \
2045 int __ret = 0; \
2046 struct compat_ebt_entry_mwt *__match; \
2047 \
2048 for (__i = sizeof(struct ebt_entry); \
2049 __i < (e)->watchers_offset; \
2050 __i += __match->match_size + \
2051 sizeof(struct compat_ebt_entry_mwt)) { \
2052 __match = (void *)(e) + __i; \
2053 __ret = fn(__match , ## args); \
2054 if (__ret != 0) \
2055 break; \
2056 } \
2057 if (__ret == 0) { \
2058 if (__i != (e)->watchers_offset) \
2059 __ret = -EINVAL; \
2060 } \
2061 __ret; \
2062})
2063
2064/* called for all ebt_entry structures. */
2065static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2066 unsigned int *total,
2067 struct ebt_entries_buf_state *state)
2068{
2069 unsigned int i, j, startoff, new_offset = 0;
2070 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2071 unsigned int offsets[4];
2072 unsigned int *offsets_update = NULL;
2073 int ret;
2074 char *buf_start;
2075
2076 if (*total < sizeof(struct ebt_entries))
2077 return -EINVAL;
2078
2079 if (!entry->bitmask) {
2080 *total -= sizeof(struct ebt_entries);
2081 return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2082 }
2083 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2084 return -EINVAL;
2085
2086 startoff = state->buf_user_offset;
2087 /* pull in most part of ebt_entry, it does not need to be changed. */
2088 ret = ebt_buf_add(state, entry,
2089 offsetof(struct ebt_entry, watchers_offset));
2090 if (ret < 0)
2091 return ret;
2092
2093 offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2094 memcpy(&offsets[1], &entry->watchers_offset,
2095 sizeof(offsets) - sizeof(offsets[0]));
2096
2097 if (state->buf_kern_start) {
2098 buf_start = state->buf_kern_start + state->buf_kern_offset;
2099 offsets_update = (unsigned int *) buf_start;
2100 }
2101 ret = ebt_buf_add(state, &offsets[1],
2102 sizeof(offsets) - sizeof(offsets[0]));
2103 if (ret < 0)
2104 return ret;
2105 buf_start = (char *) entry;
2106 /*
2107 * 0: matches offset, always follows ebt_entry.
2108 * 1: watchers offset, from ebt_entry structure
2109 * 2: target offset, from ebt_entry structure
2110 * 3: next ebt_entry offset, from ebt_entry structure
2111 *
2112 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2113 */
2114 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2115 struct compat_ebt_entry_mwt *match32;
2116 unsigned int size;
2117 char *buf = buf_start;
2118
2119 buf = buf_start + offsets[i];
2120 if (offsets[i] > offsets[j])
2121 return -EINVAL;
2122
2123 match32 = (struct compat_ebt_entry_mwt *) buf;
2124 size = offsets[j] - offsets[i];
2125 ret = ebt_size_mwt(match32, size, i, state, base);
2126 if (ret < 0)
2127 return ret;
2128 new_offset += ret;
2129 if (offsets_update && new_offset) {
2130 pr_debug("ebtables: change offset %d to %d\n",
2131 offsets_update[i], offsets[j] + new_offset);
2132 offsets_update[i] = offsets[j] + new_offset;
2133 }
2134 }
2135
2136 startoff = state->buf_user_offset - startoff;
2137
2138 BUG_ON(*total < startoff);
2139 *total -= startoff;
2140 return 0;
2141}
2142
2143/*
2144 * repl->entries_size is the size of the ebt_entry blob in userspace.
2145 * It might need more memory when copied to a 64 bit kernel in case
2146 * userspace is 32-bit. So, first task: find out how much memory is needed.
2147 *
2148 * Called before validation is performed.
2149 */
2150static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2151 struct ebt_entries_buf_state *state)
2152{
2153 unsigned int size_remaining = size_user;
2154 int ret;
2155
2156 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2157 &size_remaining, state);
2158 if (ret < 0)
2159 return ret;
2160
2161 WARN_ON(size_remaining);
2162 return state->buf_kern_offset;
2163}
2164
2165
2166static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2167 void __user *user, unsigned int len)
2168{
2169 struct compat_ebt_replace tmp;
2170 int i;
2171
2172 if (len < sizeof(tmp))
2173 return -EINVAL;
2174
2175 if (copy_from_user(&tmp, user, sizeof(tmp)))
2176 return -EFAULT;
2177
2178 if (len != sizeof(tmp) + tmp.entries_size)
2179 return -EINVAL;
2180
2181 if (tmp.entries_size == 0)
2182 return -EINVAL;
2183
2184 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2185 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2186 return -ENOMEM;
2187 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2188 return -ENOMEM;
2189
2190 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2191
2192 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2193 for (i = 0; i < NF_BR_NUMHOOKS; i++)
2194 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2195
2196 repl->num_counters = tmp.num_counters;
2197 repl->counters = compat_ptr(tmp.counters);
2198 repl->entries = compat_ptr(tmp.entries);
2199 return 0;
2200}
2201
2202static int compat_do_replace(struct net *net, void __user *user,
2203 unsigned int len)
2204{
2205 int ret, i, countersize, size64;
2206 struct ebt_table_info *newinfo;
2207 struct ebt_replace tmp;
2208 struct ebt_entries_buf_state state;
2209 void *entries_tmp;
2210
2211 ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2212 if (ret) {
2213 /* try real handler in case userland supplied needed padding */
2214 if (ret == -EINVAL && do_replace(net, user, len) == 0)
2215 ret = 0;
2216 return ret;
2217 }
2218
2219 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2220 newinfo = vmalloc(sizeof(*newinfo) + countersize);
2221 if (!newinfo)
2222 return -ENOMEM;
2223
2224 if (countersize)
2225 memset(newinfo->counters, 0, countersize);
2226
2227 memset(&state, 0, sizeof(state));
2228
2229 newinfo->entries = vmalloc(tmp.entries_size);
2230 if (!newinfo->entries) {
2231 ret = -ENOMEM;
2232 goto free_newinfo;
2233 }
2234 if (copy_from_user(
2235 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2236 ret = -EFAULT;
2237 goto free_entries;
2238 }
2239
2240 entries_tmp = newinfo->entries;
2241
2242 xt_compat_lock(NFPROTO_BRIDGE);
2243
2244 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2245 if (ret < 0)
2246 goto out_unlock;
2247
2248 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2249 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2250 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2251
2252 size64 = ret;
2253 newinfo->entries = vmalloc(size64);
2254 if (!newinfo->entries) {
2255 vfree(entries_tmp);
2256 ret = -ENOMEM;
2257 goto out_unlock;
2258 }
2259
2260 memset(&state, 0, sizeof(state));
2261 state.buf_kern_start = newinfo->entries;
2262 state.buf_kern_len = size64;
2263
2264 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2265 BUG_ON(ret < 0); /* parses same data again */
2266
2267 vfree(entries_tmp);
2268 tmp.entries_size = size64;
2269
2270 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2271 char __user *usrptr;
2272 if (tmp.hook_entry[i]) {
2273 unsigned int delta;
2274 usrptr = (char __user *) tmp.hook_entry[i];
2275 delta = usrptr - tmp.entries;
2276 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2277 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2278 }
2279 }
2280
2281 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2282 xt_compat_unlock(NFPROTO_BRIDGE);
2283
2284 ret = do_replace_finish(net, &tmp, newinfo);
2285 if (ret == 0)
2286 return ret;
2287free_entries:
2288 vfree(newinfo->entries);
2289free_newinfo:
2290 vfree(newinfo);
2291 return ret;
2292out_unlock:
2293 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2294 xt_compat_unlock(NFPROTO_BRIDGE);
2295 goto free_entries;
2296}
2297
2298static int compat_update_counters(struct net *net, void __user *user,
2299 unsigned int len)
2300{
2301 struct compat_ebt_replace hlp;
2302
2303 if (copy_from_user(&hlp, user, sizeof(hlp)))
2304 return -EFAULT;
2305
2306 /* try real handler in case userland supplied needed padding */
2307 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2308 return update_counters(net, user, len);
2309
2310 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2311 hlp.num_counters, user, len);
2312}
2313
2314static int compat_do_ebt_set_ctl(struct sock *sk,
2315 int cmd, void __user *user, unsigned int len)
2316{
2317 int ret;
2318
2319 if (!capable(CAP_NET_ADMIN))
2320 return -EPERM;
2321
2322 switch (cmd) {
2323 case EBT_SO_SET_ENTRIES:
2324 ret = compat_do_replace(sock_net(sk), user, len);
2325 break;
2326 case EBT_SO_SET_COUNTERS:
2327 ret = compat_update_counters(sock_net(sk), user, len);
2328 break;
2329 default:
2330 ret = -EINVAL;
2331 }
2332 return ret;
2333}
2334
2335static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2336 void __user *user, int *len)
2337{
2338 int ret;
2339 struct compat_ebt_replace tmp;
2340 struct ebt_table *t;
2341
2342 if (!capable(CAP_NET_ADMIN))
2343 return -EPERM;
2344
2345 /* try real handler in case userland supplied needed padding */
2346 if ((cmd == EBT_SO_GET_INFO ||
2347 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2348 return do_ebt_get_ctl(sk, cmd, user, len);
2349
2350 if (copy_from_user(&tmp, user, sizeof(tmp)))
2351 return -EFAULT;
2352
2353 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
2354 if (!t)
2355 return ret;
2356
2357 xt_compat_lock(NFPROTO_BRIDGE);
2358 switch (cmd) {
2359 case EBT_SO_GET_INFO:
2360 tmp.nentries = t->private->nentries;
2361 ret = compat_table_info(t->private, &tmp);
2362 if (ret)
2363 goto out;
2364 tmp.valid_hooks = t->valid_hooks;
2365
2366 if (copy_to_user(user, &tmp, *len) != 0) {
2367 ret = -EFAULT;
2368 break;
2369 }
2370 ret = 0;
2371 break;
2372 case EBT_SO_GET_INIT_INFO:
2373 tmp.nentries = t->table->nentries;
2374 tmp.entries_size = t->table->entries_size;
2375 tmp.valid_hooks = t->table->valid_hooks;
2376
2377 if (copy_to_user(user, &tmp, *len) != 0) {
2378 ret = -EFAULT;
2379 break;
2380 }
2381 ret = 0;
2382 break;
2383 case EBT_SO_GET_ENTRIES:
2384 case EBT_SO_GET_INIT_ENTRIES:
2385 /*
2386 * try real handler first in case of userland-side padding.
2387 * in case we are dealing with an 'ordinary' 32 bit binary
2388 * without 64bit compatibility padding, this will fail right
2389 * after copy_from_user when the *len argument is validated.
2390 *
2391 * the compat_ variant needs to do one pass over the kernel
2392 * data set to adjust for size differences before it the check.
2393 */
2394 if (copy_everything_to_user(t, user, len, cmd) == 0)
2395 ret = 0;
2396 else
2397 ret = compat_copy_everything_to_user(t, user, len, cmd);
2398 break;
2399 default:
2400 ret = -EINVAL;
2401 }
2402 out:
2403 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2404 xt_compat_unlock(NFPROTO_BRIDGE);
2405 mutex_unlock(&ebt_mutex);
2406 return ret;
2407}
2408#endif
2409
1481static struct nf_sockopt_ops ebt_sockopts = 2410static struct nf_sockopt_ops ebt_sockopts =
1482{ 2411{
1483 .pf = PF_INET, 2412 .pf = PF_INET,
1484 .set_optmin = EBT_BASE_CTL, 2413 .set_optmin = EBT_BASE_CTL,
1485 .set_optmax = EBT_SO_SET_MAX + 1, 2414 .set_optmax = EBT_SO_SET_MAX + 1,
1486 .set = do_ebt_set_ctl, 2415 .set = do_ebt_set_ctl,
2416#ifdef CONFIG_COMPAT
2417 .compat_set = compat_do_ebt_set_ctl,
2418#endif
1487 .get_optmin = EBT_BASE_CTL, 2419 .get_optmin = EBT_BASE_CTL,
1488 .get_optmax = EBT_SO_GET_MAX + 1, 2420 .get_optmax = EBT_SO_GET_MAX + 1,
1489 .get = do_ebt_get_ctl, 2421 .get = do_ebt_get_ctl,
2422#ifdef CONFIG_COMPAT
2423 .compat_get = compat_do_ebt_get_ctl,
2424#endif
1490 .owner = THIS_MODULE, 2425 .owner = THIS_MODULE,
1491}; 2426};
1492 2427
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 51adc4c2b86..702be5a2c95 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -77,8 +77,8 @@ static int stats_timer __read_mostly = 1;
77module_param(stats_timer, int, S_IRUGO); 77module_param(stats_timer, int, S_IRUGO);
78MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)"); 78MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
79 79
80HLIST_HEAD(can_rx_dev_list); 80/* receive filters subscribed for 'all' CAN devices */
81static struct dev_rcv_lists can_rx_alldev_list; 81struct dev_rcv_lists can_rx_alldev_list;
82static DEFINE_SPINLOCK(can_rcvlists_lock); 82static DEFINE_SPINLOCK(can_rcvlists_lock);
83 83
84static struct kmem_cache *rcv_cache __read_mostly; 84static struct kmem_cache *rcv_cache __read_mostly;
@@ -292,28 +292,10 @@ EXPORT_SYMBOL(can_send);
292 292
293static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev) 293static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
294{ 294{
295 struct dev_rcv_lists *d = NULL; 295 if (!dev)
296 struct hlist_node *n; 296 return &can_rx_alldev_list;
297 297 else
298 /* 298 return (struct dev_rcv_lists *)dev->ml_priv;
299 * find receive list for this device
300 *
301 * The hlist_for_each_entry*() macros curse through the list
302 * using the pointer variable n and set d to the containing
303 * struct in each list iteration. Therefore, after list
304 * iteration, d is unmodified when the list is empty, and it
305 * points to last list element, when the list is non-empty
306 * but no match in the loop body is found. I.e. d is *not*
307 * NULL when no match is found. We can, however, use the
308 * cursor variable n to decide if a match was found.
309 */
310
311 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
312 if (d->dev == dev)
313 break;
314 }
315
316 return n ? d : NULL;
317} 299}
318 300
319/** 301/**
@@ -433,6 +415,9 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
433 415
434 /* insert new receiver (dev,canid,mask) -> (func,data) */ 416 /* insert new receiver (dev,canid,mask) -> (func,data) */
435 417
418 if (dev && dev->type != ARPHRD_CAN)
419 return -ENODEV;
420
436 r = kmem_cache_alloc(rcv_cache, GFP_KERNEL); 421 r = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
437 if (!r) 422 if (!r)
438 return -ENOMEM; 423 return -ENOMEM;
@@ -468,16 +453,6 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
468EXPORT_SYMBOL(can_rx_register); 453EXPORT_SYMBOL(can_rx_register);
469 454
470/* 455/*
471 * can_rx_delete_device - rcu callback for dev_rcv_lists structure removal
472 */
473static void can_rx_delete_device(struct rcu_head *rp)
474{
475 struct dev_rcv_lists *d = container_of(rp, struct dev_rcv_lists, rcu);
476
477 kfree(d);
478}
479
480/*
481 * can_rx_delete_receiver - rcu callback for single receiver entry removal 456 * can_rx_delete_receiver - rcu callback for single receiver entry removal
482 */ 457 */
483static void can_rx_delete_receiver(struct rcu_head *rp) 458static void can_rx_delete_receiver(struct rcu_head *rp)
@@ -506,6 +481,9 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
506 struct hlist_node *next; 481 struct hlist_node *next;
507 struct dev_rcv_lists *d; 482 struct dev_rcv_lists *d;
508 483
484 if (dev && dev->type != ARPHRD_CAN)
485 return;
486
509 spin_lock(&can_rcvlists_lock); 487 spin_lock(&can_rcvlists_lock);
510 488
511 d = find_dev_rcv_lists(dev); 489 d = find_dev_rcv_lists(dev);
@@ -541,7 +519,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
541 "dev %s, id %03X, mask %03X\n", 519 "dev %s, id %03X, mask %03X\n",
542 DNAME(dev), can_id, mask); 520 DNAME(dev), can_id, mask);
543 r = NULL; 521 r = NULL;
544 d = NULL;
545 goto out; 522 goto out;
546 } 523 }
547 524
@@ -552,10 +529,10 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
552 can_pstats.rcv_entries--; 529 can_pstats.rcv_entries--;
553 530
554 /* remove device structure requested by NETDEV_UNREGISTER */ 531 /* remove device structure requested by NETDEV_UNREGISTER */
555 if (d->remove_on_zero_entries && !d->entries) 532 if (d->remove_on_zero_entries && !d->entries) {
556 hlist_del_rcu(&d->list); 533 kfree(d);
557 else 534 dev->ml_priv = NULL;
558 d = NULL; 535 }
559 536
560 out: 537 out:
561 spin_unlock(&can_rcvlists_lock); 538 spin_unlock(&can_rcvlists_lock);
@@ -563,10 +540,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
563 /* schedule the receiver item for deletion */ 540 /* schedule the receiver item for deletion */
564 if (r) 541 if (r)
565 call_rcu(&r->rcu, can_rx_delete_receiver); 542 call_rcu(&r->rcu, can_rx_delete_receiver);
566
567 /* schedule the device structure for deletion */
568 if (d)
569 call_rcu(&d->rcu, can_rx_delete_device);
570} 543}
571EXPORT_SYMBOL(can_rx_unregister); 544EXPORT_SYMBOL(can_rx_unregister);
572 545
@@ -780,48 +753,35 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
780 753
781 case NETDEV_REGISTER: 754 case NETDEV_REGISTER:
782 755
783 /* 756 /* create new dev_rcv_lists for this device */
784 * create new dev_rcv_lists for this device
785 *
786 * N.B. zeroing the struct is the correct initialization
787 * for the embedded hlist_head structs.
788 * Another list type, e.g. list_head, would require
789 * explicit initialization.
790 */
791
792 d = kzalloc(sizeof(*d), GFP_KERNEL); 757 d = kzalloc(sizeof(*d), GFP_KERNEL);
793 if (!d) { 758 if (!d) {
794 printk(KERN_ERR 759 printk(KERN_ERR
795 "can: allocation of receive list failed\n"); 760 "can: allocation of receive list failed\n");
796 return NOTIFY_DONE; 761 return NOTIFY_DONE;
797 } 762 }
798 d->dev = dev; 763 BUG_ON(dev->ml_priv);
799 764 dev->ml_priv = d;
800 spin_lock(&can_rcvlists_lock);
801 hlist_add_head_rcu(&d->list, &can_rx_dev_list);
802 spin_unlock(&can_rcvlists_lock);
803 765
804 break; 766 break;
805 767
806 case NETDEV_UNREGISTER: 768 case NETDEV_UNREGISTER:
807 spin_lock(&can_rcvlists_lock); 769 spin_lock(&can_rcvlists_lock);
808 770
809 d = find_dev_rcv_lists(dev); 771 d = dev->ml_priv;
810 if (d) { 772 if (d) {
811 if (d->entries) { 773 if (d->entries)
812 d->remove_on_zero_entries = 1; 774 d->remove_on_zero_entries = 1;
813 d = NULL; 775 else {
814 } else 776 kfree(d);
815 hlist_del_rcu(&d->list); 777 dev->ml_priv = NULL;
778 }
816 } else 779 } else
817 printk(KERN_ERR "can: notifier: receive list not " 780 printk(KERN_ERR "can: notifier: receive list not "
818 "found for dev %s\n", dev->name); 781 "found for dev %s\n", dev->name);
819 782
820 spin_unlock(&can_rcvlists_lock); 783 spin_unlock(&can_rcvlists_lock);
821 784
822 if (d)
823 call_rcu(&d->rcu, can_rx_delete_device);
824
825 break; 785 break;
826 } 786 }
827 787
@@ -853,21 +813,13 @@ static __init int can_init(void)
853{ 813{
854 printk(banner); 814 printk(banner);
855 815
816 memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list));
817
856 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver), 818 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
857 0, 0, NULL); 819 0, 0, NULL);
858 if (!rcv_cache) 820 if (!rcv_cache)
859 return -ENOMEM; 821 return -ENOMEM;
860 822
861 /*
862 * Insert can_rx_alldev_list for reception on all devices.
863 * This struct is zero initialized which is correct for the
864 * embedded hlist heads, the dev pointer, and the entries counter.
865 */
866
867 spin_lock(&can_rcvlists_lock);
868 hlist_add_head_rcu(&can_rx_alldev_list.list, &can_rx_dev_list);
869 spin_unlock(&can_rcvlists_lock);
870
871 if (stats_timer) { 823 if (stats_timer) {
872 /* the statistics are updated every second (timer triggered) */ 824 /* the statistics are updated every second (timer triggered) */
873 setup_timer(&can_stattimer, can_stat_update, 0); 825 setup_timer(&can_stattimer, can_stat_update, 0);
@@ -887,8 +839,7 @@ static __init int can_init(void)
887 839
888static __exit void can_exit(void) 840static __exit void can_exit(void)
889{ 841{
890 struct dev_rcv_lists *d; 842 struct net_device *dev;
891 struct hlist_node *n, *next;
892 843
893 if (stats_timer) 844 if (stats_timer)
894 del_timer(&can_stattimer); 845 del_timer(&can_stattimer);
@@ -900,14 +851,19 @@ static __exit void can_exit(void)
900 unregister_netdevice_notifier(&can_netdev_notifier); 851 unregister_netdevice_notifier(&can_netdev_notifier);
901 sock_unregister(PF_CAN); 852 sock_unregister(PF_CAN);
902 853
903 /* remove can_rx_dev_list */ 854 /* remove created dev_rcv_lists from still registered CAN devices */
904 spin_lock(&can_rcvlists_lock); 855 rcu_read_lock();
905 hlist_del(&can_rx_alldev_list.list); 856 for_each_netdev_rcu(&init_net, dev) {
906 hlist_for_each_entry_safe(d, n, next, &can_rx_dev_list, list) { 857 if (dev->type == ARPHRD_CAN && dev->ml_priv){
907 hlist_del(&d->list); 858
908 kfree(d); 859 struct dev_rcv_lists *d = dev->ml_priv;
860
861 BUG_ON(d->entries);
862 kfree(d);
863 dev->ml_priv = NULL;
864 }
909 } 865 }
910 spin_unlock(&can_rcvlists_lock); 866 rcu_read_unlock();
911 867
912 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 868 rcu_barrier(); /* Wait for completion of call_rcu()'s */
913 869
diff --git a/net/can/af_can.h b/net/can/af_can.h
index 18f91e37cc3..34253b84e30 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -63,10 +63,8 @@ struct receiver {
63 63
64enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX }; 64enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX };
65 65
66/* per device receive filters linked at dev->ml_priv */
66struct dev_rcv_lists { 67struct dev_rcv_lists {
67 struct hlist_node list;
68 struct rcu_head rcu;
69 struct net_device *dev;
70 struct hlist_head rx[RX_MAX]; 68 struct hlist_head rx[RX_MAX];
71 struct hlist_head rx_sff[0x800]; 69 struct hlist_head rx_sff[0x800];
72 int remove_on_zero_entries; 70 int remove_on_zero_entries;
diff --git a/net/can/proc.c b/net/can/proc.c
index 9b9ad29be56..f4265cc9c3f 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -45,6 +45,7 @@
45#include <linux/proc_fs.h> 45#include <linux/proc_fs.h>
46#include <linux/list.h> 46#include <linux/list.h>
47#include <linux/rcupdate.h> 47#include <linux/rcupdate.h>
48#include <linux/if_arp.h>
48#include <linux/can/core.h> 49#include <linux/can/core.h>
49 50
50#include "af_can.h" 51#include "af_can.h"
@@ -84,6 +85,9 @@ static const char rx_list_name[][8] = {
84 [RX_EFF] = "rx_eff", 85 [RX_EFF] = "rx_eff",
85}; 86};
86 87
88/* receive filters subscribed for 'all' CAN devices */
89extern struct dev_rcv_lists can_rx_alldev_list;
90
87/* 91/*
88 * af_can statistics stuff 92 * af_can statistics stuff
89 */ 93 */
@@ -190,10 +194,6 @@ void can_stat_update(unsigned long data)
190 194
191/* 195/*
192 * proc read functions 196 * proc read functions
193 *
194 * From known use-cases we expect about 10 entries in a receive list to be
195 * printed in the proc_fs. So PAGE_SIZE is definitely enough space here.
196 *
197 */ 197 */
198 198
199static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list, 199static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
@@ -202,7 +202,6 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
202 struct receiver *r; 202 struct receiver *r;
203 struct hlist_node *n; 203 struct hlist_node *n;
204 204
205 rcu_read_lock();
206 hlist_for_each_entry_rcu(r, n, rx_list, list) { 205 hlist_for_each_entry_rcu(r, n, rx_list, list) {
207 char *fmt = (r->can_id & CAN_EFF_FLAG)? 206 char *fmt = (r->can_id & CAN_EFF_FLAG)?
208 " %-5s %08X %08x %08x %08x %8ld %s\n" : 207 " %-5s %08X %08x %08x %08x %8ld %s\n" :
@@ -212,7 +211,6 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
212 (unsigned long)r->func, (unsigned long)r->data, 211 (unsigned long)r->func, (unsigned long)r->data,
213 r->matches, r->ident); 212 r->matches, r->ident);
214 } 213 }
215 rcu_read_unlock();
216} 214}
217 215
218static void can_print_recv_banner(struct seq_file *m) 216static void can_print_recv_banner(struct seq_file *m)
@@ -346,24 +344,39 @@ static const struct file_operations can_version_proc_fops = {
346 .release = single_release, 344 .release = single_release,
347}; 345};
348 346
347static inline void can_rcvlist_proc_show_one(struct seq_file *m, int idx,
348 struct net_device *dev,
349 struct dev_rcv_lists *d)
350{
351 if (!hlist_empty(&d->rx[idx])) {
352 can_print_recv_banner(m);
353 can_print_rcvlist(m, &d->rx[idx], dev);
354 } else
355 seq_printf(m, " (%s: no entry)\n", DNAME(dev));
356
357}
358
349static int can_rcvlist_proc_show(struct seq_file *m, void *v) 359static int can_rcvlist_proc_show(struct seq_file *m, void *v)
350{ 360{
351 /* double cast to prevent GCC warning */ 361 /* double cast to prevent GCC warning */
352 int idx = (int)(long)m->private; 362 int idx = (int)(long)m->private;
363 struct net_device *dev;
353 struct dev_rcv_lists *d; 364 struct dev_rcv_lists *d;
354 struct hlist_node *n;
355 365
356 seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]); 366 seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]);
357 367
358 rcu_read_lock(); 368 rcu_read_lock();
359 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
360 369
361 if (!hlist_empty(&d->rx[idx])) { 370 /* receive list for 'all' CAN devices (dev == NULL) */
362 can_print_recv_banner(m); 371 d = &can_rx_alldev_list;
363 can_print_rcvlist(m, &d->rx[idx], d->dev); 372 can_rcvlist_proc_show_one(m, idx, NULL, d);
364 } else 373
365 seq_printf(m, " (%s: no entry)\n", DNAME(d->dev)); 374 /* receive list for registered CAN devices */
375 for_each_netdev_rcu(&init_net, dev) {
376 if (dev->type == ARPHRD_CAN && dev->ml_priv)
377 can_rcvlist_proc_show_one(m, idx, dev, dev->ml_priv);
366 } 378 }
379
367 rcu_read_unlock(); 380 rcu_read_unlock();
368 381
369 seq_putc(m, '\n'); 382 seq_putc(m, '\n');
@@ -383,34 +396,50 @@ static const struct file_operations can_rcvlist_proc_fops = {
383 .release = single_release, 396 .release = single_release,
384}; 397};
385 398
399static inline void can_rcvlist_sff_proc_show_one(struct seq_file *m,
400 struct net_device *dev,
401 struct dev_rcv_lists *d)
402{
403 int i;
404 int all_empty = 1;
405
406 /* check wether at least one list is non-empty */
407 for (i = 0; i < 0x800; i++)
408 if (!hlist_empty(&d->rx_sff[i])) {
409 all_empty = 0;
410 break;
411 }
412
413 if (!all_empty) {
414 can_print_recv_banner(m);
415 for (i = 0; i < 0x800; i++) {
416 if (!hlist_empty(&d->rx_sff[i]))
417 can_print_rcvlist(m, &d->rx_sff[i], dev);
418 }
419 } else
420 seq_printf(m, " (%s: no entry)\n", DNAME(dev));
421}
422
386static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v) 423static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
387{ 424{
425 struct net_device *dev;
388 struct dev_rcv_lists *d; 426 struct dev_rcv_lists *d;
389 struct hlist_node *n;
390 427
391 /* RX_SFF */ 428 /* RX_SFF */
392 seq_puts(m, "\nreceive list 'rx_sff':\n"); 429 seq_puts(m, "\nreceive list 'rx_sff':\n");
393 430
394 rcu_read_lock(); 431 rcu_read_lock();
395 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) { 432
396 int i, all_empty = 1; 433 /* sff receive list for 'all' CAN devices (dev == NULL) */
397 /* check wether at least one list is non-empty */ 434 d = &can_rx_alldev_list;
398 for (i = 0; i < 0x800; i++) 435 can_rcvlist_sff_proc_show_one(m, NULL, d);
399 if (!hlist_empty(&d->rx_sff[i])) { 436
400 all_empty = 0; 437 /* sff receive list for registered CAN devices */
401 break; 438 for_each_netdev_rcu(&init_net, dev) {
402 } 439 if (dev->type == ARPHRD_CAN && dev->ml_priv)
403 440 can_rcvlist_sff_proc_show_one(m, dev, dev->ml_priv);
404 if (!all_empty) {
405 can_print_recv_banner(m);
406 for (i = 0; i < 0x800; i++) {
407 if (!hlist_empty(&d->rx_sff[i]))
408 can_print_rcvlist(m, &d->rx_sff[i],
409 d->dev);
410 }
411 } else
412 seq_printf(m, " (%s: no entry)\n", DNAME(d->dev));
413 } 441 }
442
414 rcu_read_unlock(); 443 rcu_read_unlock();
415 444
416 seq_putc(m, '\n'); 445 seq_putc(m, '\n');
diff --git a/net/core/dev.c b/net/core/dev.c
index ec874218b20..1968980f513 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1448,13 +1448,10 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1448 if (skb->len > (dev->mtu + dev->hard_header_len)) 1448 if (skb->len > (dev->mtu + dev->hard_header_len))
1449 return NET_RX_DROP; 1449 return NET_RX_DROP;
1450 1450
1451 skb_dst_drop(skb); 1451 skb_set_dev(skb, dev);
1452 skb->tstamp.tv64 = 0; 1452 skb->tstamp.tv64 = 0;
1453 skb->pkt_type = PACKET_HOST; 1453 skb->pkt_type = PACKET_HOST;
1454 skb->protocol = eth_type_trans(skb, dev); 1454 skb->protocol = eth_type_trans(skb, dev);
1455 skb->mark = 0;
1456 secpath_reset(skb);
1457 nf_reset(skb);
1458 return netif_rx(skb); 1455 return netif_rx(skb);
1459} 1456}
1460EXPORT_SYMBOL_GPL(dev_forward_skb); 1457EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -1614,6 +1611,36 @@ static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1614 return false; 1611 return false;
1615} 1612}
1616 1613
1614/**
1615 * skb_dev_set -- assign a new device to a buffer
1616 * @skb: buffer for the new device
1617 * @dev: network device
1618 *
1619 * If an skb is owned by a device already, we have to reset
1620 * all data private to the namespace a device belongs to
1621 * before assigning it a new device.
1622 */
1623#ifdef CONFIG_NET_NS
1624void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1625{
1626 skb_dst_drop(skb);
1627 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1628 secpath_reset(skb);
1629 nf_reset(skb);
1630 skb_init_secmark(skb);
1631 skb->mark = 0;
1632 skb->priority = 0;
1633 skb->nf_trace = 0;
1634 skb->ipvs_property = 0;
1635#ifdef CONFIG_NET_SCHED
1636 skb->tc_index = 0;
1637#endif
1638 }
1639 skb->dev = dev;
1640}
1641EXPORT_SYMBOL(skb_set_dev);
1642#endif /* CONFIG_NET_NS */
1643
1617/* 1644/*
1618 * Invalidate hardware checksum when packet is to be mangled, and 1645 * Invalidate hardware checksum when packet is to be mangled, and
1619 * complete checksum manually on outgoing path. 1646 * complete checksum manually on outgoing path.
@@ -1853,6 +1880,14 @@ gso:
1853 1880
1854 skb->next = nskb->next; 1881 skb->next = nskb->next;
1855 nskb->next = NULL; 1882 nskb->next = NULL;
1883
1884 /*
1885 * If device doesnt need nskb->dst, release it right now while
1886 * its hot in this cpu cache
1887 */
1888 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1889 skb_dst_drop(nskb);
1890
1856 rc = ops->ndo_start_xmit(nskb, dev); 1891 rc = ops->ndo_start_xmit(nskb, dev);
1857 if (unlikely(rc != NETDEV_TX_OK)) { 1892 if (unlikely(rc != NETDEV_TX_OK)) {
1858 if (rc & ~NETDEV_TX_MASK) 1893 if (rc & ~NETDEV_TX_MASK)
@@ -1974,6 +2009,21 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1974 return rc; 2009 return rc;
1975} 2010}
1976 2011
2012/*
2013 * Returns true if either:
2014 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2015 * 2. skb is fragmented and the device does not support SG, or if
2016 * at least one of fragments is in highmem and device does not
2017 * support DMA from it.
2018 */
2019static inline int skb_needs_linearize(struct sk_buff *skb,
2020 struct net_device *dev)
2021{
2022 return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
2023 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
2024 illegal_highdma(dev, skb)));
2025}
2026
1977/** 2027/**
1978 * dev_queue_xmit - transmit a buffer 2028 * dev_queue_xmit - transmit a buffer
1979 * @skb: buffer to transmit 2029 * @skb: buffer to transmit
@@ -2010,18 +2060,8 @@ int dev_queue_xmit(struct sk_buff *skb)
2010 if (netif_needs_gso(dev, skb)) 2060 if (netif_needs_gso(dev, skb))
2011 goto gso; 2061 goto gso;
2012 2062
2013 if (skb_has_frags(skb) && 2063 /* Convert a paged skb to linear, if required */
2014 !(dev->features & NETIF_F_FRAGLIST) && 2064 if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
2015 __skb_linearize(skb))
2016 goto out_kfree_skb;
2017
2018 /* Fragmented skb is linearized if device does not support SG,
2019 * or if at least one of fragments is in highmem and device
2020 * does not support DMA from it.
2021 */
2022 if (skb_shinfo(skb)->nr_frags &&
2023 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
2024 __skb_linearize(skb))
2025 goto out_kfree_skb; 2065 goto out_kfree_skb;
2026 2066
2027 /* If packet is not checksummed and device does not support 2067 /* If packet is not checksummed and device does not support
@@ -2422,6 +2462,7 @@ int netif_receive_skb(struct sk_buff *skb)
2422 struct packet_type *ptype, *pt_prev; 2462 struct packet_type *ptype, *pt_prev;
2423 struct net_device *orig_dev; 2463 struct net_device *orig_dev;
2424 struct net_device *null_or_orig; 2464 struct net_device *null_or_orig;
2465 struct net_device *null_or_bond;
2425 int ret = NET_RX_DROP; 2466 int ret = NET_RX_DROP;
2426 __be16 type; 2467 __be16 type;
2427 2468
@@ -2487,12 +2528,24 @@ ncls:
2487 if (!skb) 2528 if (!skb)
2488 goto out; 2529 goto out;
2489 2530
2531 /*
2532 * Make sure frames received on VLAN interfaces stacked on
2533 * bonding interfaces still make their way to any base bonding
2534 * device that may have registered for a specific ptype. The
2535 * handler may have to adjust skb->dev and orig_dev.
2536 */
2537 null_or_bond = NULL;
2538 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2539 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2540 null_or_bond = vlan_dev_real_dev(skb->dev);
2541 }
2542
2490 type = skb->protocol; 2543 type = skb->protocol;
2491 list_for_each_entry_rcu(ptype, 2544 list_for_each_entry_rcu(ptype,
2492 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2545 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2493 if (ptype->type == type && 2546 if (ptype->type == type && (ptype->dev == null_or_orig ||
2494 (ptype->dev == null_or_orig || ptype->dev == skb->dev || 2547 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2495 ptype->dev == orig_dev)) { 2548 ptype->dev == null_or_bond)) {
2496 if (pt_prev) 2549 if (pt_prev)
2497 ret = deliver_skb(skb, pt_prev, orig_dev); 2550 ret = deliver_skb(skb, pt_prev, orig_dev);
2498 pt_prev = ptype; 2551 pt_prev = ptype;
@@ -2561,7 +2614,7 @@ out:
2561 return netif_receive_skb(skb); 2614 return netif_receive_skb(skb);
2562} 2615}
2563 2616
2564void napi_gro_flush(struct napi_struct *napi) 2617static void napi_gro_flush(struct napi_struct *napi)
2565{ 2618{
2566 struct sk_buff *skb, *next; 2619 struct sk_buff *skb, *next;
2567 2620
@@ -2574,7 +2627,6 @@ void napi_gro_flush(struct napi_struct *napi)
2574 napi->gro_count = 0; 2627 napi->gro_count = 0;
2575 napi->gro_list = NULL; 2628 napi->gro_list = NULL;
2576} 2629}
2577EXPORT_SYMBOL(napi_gro_flush);
2578 2630
2579enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2631enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2580{ 2632{
@@ -3185,7 +3237,7 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3185{ 3237{
3186 const struct net_device_stats *stats = dev_get_stats(dev); 3238 const struct net_device_stats *stats = dev_get_stats(dev);
3187 3239
3188 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " 3240 seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3189 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", 3241 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3190 dev->name, stats->rx_bytes, stats->rx_packets, 3242 dev->name, stats->rx_bytes, stats->rx_packets,
3191 stats->rx_errors, 3243 stats->rx_errors,
@@ -3640,10 +3692,10 @@ void __dev_set_rx_mode(struct net_device *dev)
3640 /* Unicast addresses changes may only happen under the rtnl, 3692 /* Unicast addresses changes may only happen under the rtnl,
3641 * therefore calling __dev_set_promiscuity here is safe. 3693 * therefore calling __dev_set_promiscuity here is safe.
3642 */ 3694 */
3643 if (dev->uc.count > 0 && !dev->uc_promisc) { 3695 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
3644 __dev_set_promiscuity(dev, 1); 3696 __dev_set_promiscuity(dev, 1);
3645 dev->uc_promisc = 1; 3697 dev->uc_promisc = 1;
3646 } else if (dev->uc.count == 0 && dev->uc_promisc) { 3698 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
3647 __dev_set_promiscuity(dev, -1); 3699 __dev_set_promiscuity(dev, -1);
3648 dev->uc_promisc = 0; 3700 dev->uc_promisc = 0;
3649 } 3701 }
@@ -4211,7 +4263,7 @@ static void dev_addr_discard(struct net_device *dev)
4211 netif_addr_lock_bh(dev); 4263 netif_addr_lock_bh(dev);
4212 4264
4213 __dev_addr_discard(&dev->mc_list); 4265 __dev_addr_discard(&dev->mc_list);
4214 dev->mc_count = 0; 4266 netdev_mc_count(dev) = 0;
4215 4267
4216 netif_addr_unlock_bh(dev); 4268 netif_addr_unlock_bh(dev);
4217} 4269}
@@ -5367,6 +5419,8 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5367 5419
5368 netdev_init_queues(dev); 5420 netdev_init_queues(dev);
5369 5421
5422 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5423 dev->ethtool_ntuple_list.count = 0;
5370 INIT_LIST_HEAD(&dev->napi_list); 5424 INIT_LIST_HEAD(&dev->napi_list);
5371 INIT_LIST_HEAD(&dev->unreg_list); 5425 INIT_LIST_HEAD(&dev->unreg_list);
5372 INIT_LIST_HEAD(&dev->link_watch_list); 5426 INIT_LIST_HEAD(&dev->link_watch_list);
@@ -5403,6 +5457,9 @@ void free_netdev(struct net_device *dev)
5403 /* Flush device addresses */ 5457 /* Flush device addresses */
5404 dev_addr_flush(dev); 5458 dev_addr_flush(dev);
5405 5459
5460 /* Clear ethtool n-tuple list */
5461 ethtool_ntuple_flush(dev);
5462
5406 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 5463 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5407 netif_napi_del(p); 5464 netif_napi_del(p);
5408 5465
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index b8e9d3a8688..f8c87497535 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -296,7 +296,6 @@ static int dropmon_net_event(struct notifier_block *ev_block,
296 296
297 new_stat->dev = dev; 297 new_stat->dev = dev;
298 new_stat->last_rx = jiffies; 298 new_stat->last_rx = jiffies;
299 INIT_RCU_HEAD(&new_stat->rcu);
300 spin_lock(&trace_state_lock); 299 spin_lock(&trace_state_lock);
301 list_add_rcu(&new_stat->list, &hw_stats_list); 300 list_add_rcu(&new_stat->list, &hw_stats_list);
302 spin_unlock(&trace_state_lock); 301 spin_unlock(&trace_state_lock);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 236a9988ea9..31b1eddc1b8 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -120,7 +120,7 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data)
120 * NETIF_F_xxx values in include/linux/netdevice.h 120 * NETIF_F_xxx values in include/linux/netdevice.h
121 */ 121 */
122static const u32 flags_dup_features = 122static const u32 flags_dup_features =
123 ETH_FLAG_LRO; 123 (ETH_FLAG_LRO | ETH_FLAG_NTUPLE);
124 124
125u32 ethtool_op_get_flags(struct net_device *dev) 125u32 ethtool_op_get_flags(struct net_device *dev)
126{ 126{
@@ -134,19 +134,42 @@ u32 ethtool_op_get_flags(struct net_device *dev)
134 134
135int ethtool_op_set_flags(struct net_device *dev, u32 data) 135int ethtool_op_set_flags(struct net_device *dev, u32 data)
136{ 136{
137 const struct ethtool_ops *ops = dev->ethtool_ops;
138
137 if (data & ETH_FLAG_LRO) 139 if (data & ETH_FLAG_LRO)
138 dev->features |= NETIF_F_LRO; 140 dev->features |= NETIF_F_LRO;
139 else 141 else
140 dev->features &= ~NETIF_F_LRO; 142 dev->features &= ~NETIF_F_LRO;
141 143
144 if (data & ETH_FLAG_NTUPLE) {
145 if (!ops->set_rx_ntuple)
146 return -EOPNOTSUPP;
147 dev->features |= NETIF_F_NTUPLE;
148 } else {
149 /* safe to clear regardless */
150 dev->features &= ~NETIF_F_NTUPLE;
151 }
152
142 return 0; 153 return 0;
143} 154}
144 155
156void ethtool_ntuple_flush(struct net_device *dev)
157{
158 struct ethtool_rx_ntuple_flow_spec_container *fsc, *f;
159
160 list_for_each_entry_safe(fsc, f, &dev->ethtool_ntuple_list.list, list) {
161 list_del(&fsc->list);
162 kfree(fsc);
163 }
164 dev->ethtool_ntuple_list.count = 0;
165}
166EXPORT_SYMBOL(ethtool_ntuple_flush);
167
145/* Handlers for each ethtool command */ 168/* Handlers for each ethtool command */
146 169
147static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) 170static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
148{ 171{
149 struct ethtool_cmd cmd = { ETHTOOL_GSET }; 172 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
150 int err; 173 int err;
151 174
152 if (!dev->ethtool_ops->get_settings) 175 if (!dev->ethtool_ops->get_settings)
@@ -174,7 +197,10 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
174 return dev->ethtool_ops->set_settings(dev, &cmd); 197 return dev->ethtool_ops->set_settings(dev, &cmd);
175} 198}
176 199
177static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) 200/*
201 * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
202 */
203static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
178{ 204{
179 struct ethtool_drvinfo info; 205 struct ethtool_drvinfo info;
180 const struct ethtool_ops *ops = dev->ethtool_ops; 206 const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -209,7 +235,10 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
209 return 0; 235 return 0;
210} 236}
211 237
212static int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) 238/*
239 * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
240 */
241static noinline int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
213{ 242{
214 struct ethtool_rxnfc cmd; 243 struct ethtool_rxnfc cmd;
215 244
@@ -222,7 +251,10 @@ static int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
222 return dev->ethtool_ops->set_rxnfc(dev, &cmd); 251 return dev->ethtool_ops->set_rxnfc(dev, &cmd);
223} 252}
224 253
225static int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) 254/*
255 * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
256 */
257static noinline int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
226{ 258{
227 struct ethtool_rxnfc info; 259 struct ethtool_rxnfc info;
228 const struct ethtool_ops *ops = dev->ethtool_ops; 260 const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -266,6 +298,315 @@ err_out:
266 return ret; 298 return ret;
267} 299}
268 300
301static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
302 struct ethtool_rx_ntuple_flow_spec *spec,
303 struct ethtool_rx_ntuple_flow_spec_container *fsc)
304{
305
306 /* don't add filters forever */
307 if (list->count >= ETHTOOL_MAX_NTUPLE_LIST_ENTRY) {
308 /* free the container */
309 kfree(fsc);
310 return;
311 }
312
313 /* Copy the whole filter over */
314 fsc->fs.flow_type = spec->flow_type;
315 memcpy(&fsc->fs.h_u, &spec->h_u, sizeof(spec->h_u));
316 memcpy(&fsc->fs.m_u, &spec->m_u, sizeof(spec->m_u));
317
318 fsc->fs.vlan_tag = spec->vlan_tag;
319 fsc->fs.vlan_tag_mask = spec->vlan_tag_mask;
320 fsc->fs.data = spec->data;
321 fsc->fs.data_mask = spec->data_mask;
322 fsc->fs.action = spec->action;
323
324 /* add to the list */
325 list_add_tail_rcu(&fsc->list, &list->list);
326 list->count++;
327}
328
329/*
330 * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
331 */
332static noinline int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr)
333{
334 struct ethtool_rx_ntuple cmd;
335 const struct ethtool_ops *ops = dev->ethtool_ops;
336 struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL;
337 int ret;
338
339 if (!(dev->features & NETIF_F_NTUPLE))
340 return -EINVAL;
341
342 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
343 return -EFAULT;
344
345 /*
346 * Cache filter in dev struct for GET operation only if
347 * the underlying driver doesn't have its own GET operation, and
348 * only if the filter was added successfully. First make sure we
349 * can allocate the filter, then continue if successful.
350 */
351 if (!ops->get_rx_ntuple) {
352 fsc = kmalloc(sizeof(*fsc), GFP_ATOMIC);
353 if (!fsc)
354 return -ENOMEM;
355 }
356
357 ret = ops->set_rx_ntuple(dev, &cmd);
358 if (ret) {
359 kfree(fsc);
360 return ret;
361 }
362
363 if (!ops->get_rx_ntuple)
364 __rx_ntuple_filter_add(&dev->ethtool_ntuple_list, &cmd.fs, fsc);
365
366 return ret;
367}
368
369static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
370{
371 struct ethtool_gstrings gstrings;
372 const struct ethtool_ops *ops = dev->ethtool_ops;
373 struct ethtool_rx_ntuple_flow_spec_container *fsc;
374 u8 *data;
375 char *p;
376 int ret, i, num_strings = 0;
377
378 if (!ops->get_sset_count)
379 return -EOPNOTSUPP;
380
381 if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
382 return -EFAULT;
383
384 ret = ops->get_sset_count(dev, gstrings.string_set);
385 if (ret < 0)
386 return ret;
387
388 gstrings.len = ret;
389
390 data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
391 if (!data)
392 return -ENOMEM;
393
394 if (ops->get_rx_ntuple) {
395 /* driver-specific filter grab */
396 ret = ops->get_rx_ntuple(dev, gstrings.string_set, data);
397 goto copy;
398 }
399
400 /* default ethtool filter grab */
401 i = 0;
402 p = (char *)data;
403 list_for_each_entry(fsc, &dev->ethtool_ntuple_list.list, list) {
404 sprintf(p, "Filter %d:\n", i);
405 p += ETH_GSTRING_LEN;
406 num_strings++;
407
408 switch (fsc->fs.flow_type) {
409 case TCP_V4_FLOW:
410 sprintf(p, "\tFlow Type: TCP\n");
411 p += ETH_GSTRING_LEN;
412 num_strings++;
413 break;
414 case UDP_V4_FLOW:
415 sprintf(p, "\tFlow Type: UDP\n");
416 p += ETH_GSTRING_LEN;
417 num_strings++;
418 break;
419 case SCTP_V4_FLOW:
420 sprintf(p, "\tFlow Type: SCTP\n");
421 p += ETH_GSTRING_LEN;
422 num_strings++;
423 break;
424 case AH_ESP_V4_FLOW:
425 sprintf(p, "\tFlow Type: AH ESP\n");
426 p += ETH_GSTRING_LEN;
427 num_strings++;
428 break;
429 case ESP_V4_FLOW:
430 sprintf(p, "\tFlow Type: ESP\n");
431 p += ETH_GSTRING_LEN;
432 num_strings++;
433 break;
434 case IP_USER_FLOW:
435 sprintf(p, "\tFlow Type: Raw IP\n");
436 p += ETH_GSTRING_LEN;
437 num_strings++;
438 break;
439 case IPV4_FLOW:
440 sprintf(p, "\tFlow Type: IPv4\n");
441 p += ETH_GSTRING_LEN;
442 num_strings++;
443 break;
444 default:
445 sprintf(p, "\tFlow Type: Unknown\n");
446 p += ETH_GSTRING_LEN;
447 num_strings++;
448 goto unknown_filter;
449 };
450
451 /* now the rest of the filters */
452 switch (fsc->fs.flow_type) {
453 case TCP_V4_FLOW:
454 case UDP_V4_FLOW:
455 case SCTP_V4_FLOW:
456 sprintf(p, "\tSrc IP addr: 0x%x\n",
457 fsc->fs.h_u.tcp_ip4_spec.ip4src);
458 p += ETH_GSTRING_LEN;
459 num_strings++;
460 sprintf(p, "\tSrc IP mask: 0x%x\n",
461 fsc->fs.m_u.tcp_ip4_spec.ip4src);
462 p += ETH_GSTRING_LEN;
463 num_strings++;
464 sprintf(p, "\tDest IP addr: 0x%x\n",
465 fsc->fs.h_u.tcp_ip4_spec.ip4dst);
466 p += ETH_GSTRING_LEN;
467 num_strings++;
468 sprintf(p, "\tDest IP mask: 0x%x\n",
469 fsc->fs.m_u.tcp_ip4_spec.ip4dst);
470 p += ETH_GSTRING_LEN;
471 num_strings++;
472 sprintf(p, "\tSrc Port: %d, mask: 0x%x\n",
473 fsc->fs.h_u.tcp_ip4_spec.psrc,
474 fsc->fs.m_u.tcp_ip4_spec.psrc);
475 p += ETH_GSTRING_LEN;
476 num_strings++;
477 sprintf(p, "\tDest Port: %d, mask: 0x%x\n",
478 fsc->fs.h_u.tcp_ip4_spec.pdst,
479 fsc->fs.m_u.tcp_ip4_spec.pdst);
480 p += ETH_GSTRING_LEN;
481 num_strings++;
482 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
483 fsc->fs.h_u.tcp_ip4_spec.tos,
484 fsc->fs.m_u.tcp_ip4_spec.tos);
485 p += ETH_GSTRING_LEN;
486 num_strings++;
487 break;
488 case AH_ESP_V4_FLOW:
489 case ESP_V4_FLOW:
490 sprintf(p, "\tSrc IP addr: 0x%x\n",
491 fsc->fs.h_u.ah_ip4_spec.ip4src);
492 p += ETH_GSTRING_LEN;
493 num_strings++;
494 sprintf(p, "\tSrc IP mask: 0x%x\n",
495 fsc->fs.m_u.ah_ip4_spec.ip4src);
496 p += ETH_GSTRING_LEN;
497 num_strings++;
498 sprintf(p, "\tDest IP addr: 0x%x\n",
499 fsc->fs.h_u.ah_ip4_spec.ip4dst);
500 p += ETH_GSTRING_LEN;
501 num_strings++;
502 sprintf(p, "\tDest IP mask: 0x%x\n",
503 fsc->fs.m_u.ah_ip4_spec.ip4dst);
504 p += ETH_GSTRING_LEN;
505 num_strings++;
506 sprintf(p, "\tSPI: %d, mask: 0x%x\n",
507 fsc->fs.h_u.ah_ip4_spec.spi,
508 fsc->fs.m_u.ah_ip4_spec.spi);
509 p += ETH_GSTRING_LEN;
510 num_strings++;
511 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
512 fsc->fs.h_u.ah_ip4_spec.tos,
513 fsc->fs.m_u.ah_ip4_spec.tos);
514 p += ETH_GSTRING_LEN;
515 num_strings++;
516 break;
517 case IP_USER_FLOW:
518 sprintf(p, "\tSrc IP addr: 0x%x\n",
519 fsc->fs.h_u.raw_ip4_spec.ip4src);
520 p += ETH_GSTRING_LEN;
521 num_strings++;
522 sprintf(p, "\tSrc IP mask: 0x%x\n",
523 fsc->fs.m_u.raw_ip4_spec.ip4src);
524 p += ETH_GSTRING_LEN;
525 num_strings++;
526 sprintf(p, "\tDest IP addr: 0x%x\n",
527 fsc->fs.h_u.raw_ip4_spec.ip4dst);
528 p += ETH_GSTRING_LEN;
529 num_strings++;
530 sprintf(p, "\tDest IP mask: 0x%x\n",
531 fsc->fs.m_u.raw_ip4_spec.ip4dst);
532 p += ETH_GSTRING_LEN;
533 num_strings++;
534 break;
535 case IPV4_FLOW:
536 sprintf(p, "\tSrc IP addr: 0x%x\n",
537 fsc->fs.h_u.usr_ip4_spec.ip4src);
538 p += ETH_GSTRING_LEN;
539 num_strings++;
540 sprintf(p, "\tSrc IP mask: 0x%x\n",
541 fsc->fs.m_u.usr_ip4_spec.ip4src);
542 p += ETH_GSTRING_LEN;
543 num_strings++;
544 sprintf(p, "\tDest IP addr: 0x%x\n",
545 fsc->fs.h_u.usr_ip4_spec.ip4dst);
546 p += ETH_GSTRING_LEN;
547 num_strings++;
548 sprintf(p, "\tDest IP mask: 0x%x\n",
549 fsc->fs.m_u.usr_ip4_spec.ip4dst);
550 p += ETH_GSTRING_LEN;
551 num_strings++;
552 sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n",
553 fsc->fs.h_u.usr_ip4_spec.l4_4_bytes,
554 fsc->fs.m_u.usr_ip4_spec.l4_4_bytes);
555 p += ETH_GSTRING_LEN;
556 num_strings++;
557 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
558 fsc->fs.h_u.usr_ip4_spec.tos,
559 fsc->fs.m_u.usr_ip4_spec.tos);
560 p += ETH_GSTRING_LEN;
561 num_strings++;
562 sprintf(p, "\tIP Version: %d, mask: 0x%x\n",
563 fsc->fs.h_u.usr_ip4_spec.ip_ver,
564 fsc->fs.m_u.usr_ip4_spec.ip_ver);
565 p += ETH_GSTRING_LEN;
566 num_strings++;
567 sprintf(p, "\tProtocol: %d, mask: 0x%x\n",
568 fsc->fs.h_u.usr_ip4_spec.proto,
569 fsc->fs.m_u.usr_ip4_spec.proto);
570 p += ETH_GSTRING_LEN;
571 num_strings++;
572 break;
573 };
574 sprintf(p, "\tVLAN: %d, mask: 0x%x\n",
575 fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask);
576 p += ETH_GSTRING_LEN;
577 num_strings++;
578 sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data);
579 p += ETH_GSTRING_LEN;
580 num_strings++;
581 sprintf(p, "\tUser-defined mask: 0x%Lx\n", fsc->fs.data_mask);
582 p += ETH_GSTRING_LEN;
583 num_strings++;
584 if (fsc->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
585 sprintf(p, "\tAction: Drop\n");
586 else
587 sprintf(p, "\tAction: Direct to queue %d\n",
588 fsc->fs.action);
589 p += ETH_GSTRING_LEN;
590 num_strings++;
591unknown_filter:
592 i++;
593 }
594copy:
595 /* indicate to userspace how many strings we actually have */
596 gstrings.len = num_strings;
597 ret = -EFAULT;
598 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
599 goto out;
600 useraddr += sizeof(gstrings);
601 if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
602 goto out;
603 ret = 0;
604
605out:
606 kfree(data);
607 return ret;
608}
609
269static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) 610static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
270{ 611{
271 struct ethtool_regs regs; 612 struct ethtool_regs regs;
@@ -324,7 +665,7 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr)
324 665
325static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) 666static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
326{ 667{
327 struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; 668 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
328 669
329 if (!dev->ethtool_ops->get_wol) 670 if (!dev->ethtool_ops->get_wol)
330 return -EOPNOTSUPP; 671 return -EOPNOTSUPP;
@@ -456,9 +797,12 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
456 return ret; 797 return ret;
457} 798}
458 799
459static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) 800/*
801 * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
802 */
803static noinline int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
460{ 804{
461 struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; 805 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE };
462 806
463 if (!dev->ethtool_ops->get_coalesce) 807 if (!dev->ethtool_ops->get_coalesce)
464 return -EOPNOTSUPP; 808 return -EOPNOTSUPP;
@@ -470,7 +814,10 @@ static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
470 return 0; 814 return 0;
471} 815}
472 816
473static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) 817/*
818 * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
819 */
820static noinline int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
474{ 821{
475 struct ethtool_coalesce coalesce; 822 struct ethtool_coalesce coalesce;
476 823
@@ -485,7 +832,7 @@ static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
485 832
486static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) 833static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
487{ 834{
488 struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; 835 struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM };
489 836
490 if (!dev->ethtool_ops->get_ringparam) 837 if (!dev->ethtool_ops->get_ringparam)
491 return -EOPNOTSUPP; 838 return -EOPNOTSUPP;
@@ -839,7 +1186,7 @@ static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr)
839static int ethtool_get_value(struct net_device *dev, char __user *useraddr, 1186static int ethtool_get_value(struct net_device *dev, char __user *useraddr,
840 u32 cmd, u32 (*actor)(struct net_device *)) 1187 u32 cmd, u32 (*actor)(struct net_device *))
841{ 1188{
842 struct ethtool_value edata = { cmd }; 1189 struct ethtool_value edata = { .cmd = cmd };
843 1190
844 if (!actor) 1191 if (!actor)
845 return -EOPNOTSUPP; 1192 return -EOPNOTSUPP;
@@ -880,7 +1227,10 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
880 return actor(dev, edata.data); 1227 return actor(dev, edata.data);
881} 1228}
882 1229
883static int ethtool_flash_device(struct net_device *dev, char __user *useraddr) 1230/*
1231 * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
1232 */
1233static noinline int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
884{ 1234{
885 struct ethtool_flash efl; 1235 struct ethtool_flash efl;
886 1236
@@ -1113,6 +1463,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1113 case ETHTOOL_RESET: 1463 case ETHTOOL_RESET:
1114 rc = ethtool_reset(dev, useraddr); 1464 rc = ethtool_reset(dev, useraddr);
1115 break; 1465 break;
1466 case ETHTOOL_SRXNTUPLE:
1467 rc = ethtool_set_rx_ntuple(dev, useraddr);
1468 break;
1469 case ETHTOOL_GRXNTUPLE:
1470 rc = ethtool_get_rx_ntuple(dev, useraddr);
1471 break;
1116 default: 1472 default:
1117 rc = -EOPNOTSUPP; 1473 rc = -EOPNOTSUPP;
1118 } 1474 }
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 02a3b2c69c1..9a24377146b 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -708,7 +708,7 @@ static struct notifier_block fib_rules_notifier = {
708 .notifier_call = fib_rules_event, 708 .notifier_call = fib_rules_event,
709}; 709};
710 710
711static int fib_rules_net_init(struct net *net) 711static int __net_init fib_rules_net_init(struct net *net)
712{ 712{
713 INIT_LIST_HEAD(&net->rules_ops); 713 INIT_LIST_HEAD(&net->rules_ops);
714 spin_lock_init(&net->rules_mod_lock); 714 spin_lock_init(&net->rules_mod_lock);
diff --git a/net/core/filter.c b/net/core/filter.c
index 08db7b9143a..7517110ff4a 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -529,6 +529,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
529 sk_filter_delayed_uncharge(sk, old_fp); 529 sk_filter_delayed_uncharge(sk, old_fp);
530 return 0; 530 return 0;
531} 531}
532EXPORT_SYMBOL_GPL(sk_attach_filter);
532 533
533int sk_detach_filter(struct sock *sk) 534int sk_detach_filter(struct sock *sk)
534{ 535{
@@ -545,3 +546,4 @@ int sk_detach_filter(struct sock *sk)
545 rcu_read_unlock_bh(); 546 rcu_read_unlock_bh();
546 return ret; 547 return ret;
547} 548}
549EXPORT_SYMBOL_GPL(sk_detach_filter);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f35377b643e..d102f6d9abd 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2417,8 +2417,7 @@ EXPORT_SYMBOL(neigh_seq_stop);
2417 2417
2418static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos) 2418static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2419{ 2419{
2420 struct proc_dir_entry *pde = seq->private; 2420 struct neigh_table *tbl = seq->private;
2421 struct neigh_table *tbl = pde->data;
2422 int cpu; 2421 int cpu;
2423 2422
2424 if (*pos == 0) 2423 if (*pos == 0)
@@ -2435,8 +2434,7 @@ static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2435 2434
2436static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2435static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2437{ 2436{
2438 struct proc_dir_entry *pde = seq->private; 2437 struct neigh_table *tbl = seq->private;
2439 struct neigh_table *tbl = pde->data;
2440 int cpu; 2438 int cpu;
2441 2439
2442 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { 2440 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
@@ -2455,8 +2453,7 @@ static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2455 2453
2456static int neigh_stat_seq_show(struct seq_file *seq, void *v) 2454static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2457{ 2455{
2458 struct proc_dir_entry *pde = seq->private; 2456 struct neigh_table *tbl = seq->private;
2459 struct neigh_table *tbl = pde->data;
2460 struct neigh_statistics *st = v; 2457 struct neigh_statistics *st = v;
2461 2458
2462 if (v == SEQ_START_TOKEN) { 2459 if (v == SEQ_START_TOKEN) {
@@ -2501,7 +2498,7 @@ static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2501 2498
2502 if (!ret) { 2499 if (!ret) {
2503 struct seq_file *sf = file->private_data; 2500 struct seq_file *sf = file->private_data;
2504 sf->private = PDE(inode); 2501 sf->private = PDE(inode)->data;
2505 } 2502 }
2506 return ret; 2503 return ret;
2507}; 2504};
@@ -2559,9 +2556,11 @@ EXPORT_SYMBOL(neigh_app_ns);
2559 2556
2560#ifdef CONFIG_SYSCTL 2557#ifdef CONFIG_SYSCTL
2561 2558
2559#define NEIGH_VARS_MAX 19
2560
2562static struct neigh_sysctl_table { 2561static struct neigh_sysctl_table {
2563 struct ctl_table_header *sysctl_header; 2562 struct ctl_table_header *sysctl_header;
2564 struct ctl_table neigh_vars[__NET_NEIGH_MAX]; 2563 struct ctl_table neigh_vars[NEIGH_VARS_MAX];
2565 char *dev_name; 2564 char *dev_name;
2566} neigh_sysctl_template __read_mostly = { 2565} neigh_sysctl_template __read_mostly = {
2567 .neigh_vars = { 2566 .neigh_vars = {
@@ -2678,8 +2677,7 @@ static struct neigh_sysctl_table {
2678}; 2677};
2679 2678
2680int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, 2679int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2681 int p_id, int pdev_id, char *p_name, 2680 char *p_name, proc_handler *handler)
2682 proc_handler *handler)
2683{ 2681{
2684 struct neigh_sysctl_table *t; 2682 struct neigh_sysctl_table *t;
2685 const char *dev_name_source = NULL; 2683 const char *dev_name_source = NULL;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 0b4d0d35ef4..7aa69725376 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -407,11 +407,24 @@ static void arp_reply(struct sk_buff *skb)
407 __be32 sip, tip; 407 __be32 sip, tip;
408 unsigned char *sha; 408 unsigned char *sha;
409 struct sk_buff *send_skb; 409 struct sk_buff *send_skb;
410 struct netpoll *np = NULL; 410 struct netpoll *np, *tmp;
411 unsigned long flags;
412 int hits = 0;
413
414 if (list_empty(&npinfo->rx_np))
415 return;
416
417 /* Before checking the packet, we do some early
418 inspection whether this is interesting at all */
419 spin_lock_irqsave(&npinfo->rx_lock, flags);
420 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
421 if (np->dev == skb->dev)
422 hits++;
423 }
424 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
411 425
412 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) 426 /* No netpoll struct is using this dev */
413 np = npinfo->rx_np; 427 if (!hits)
414 if (!np)
415 return; 428 return;
416 429
417 /* No arp on this interface */ 430 /* No arp on this interface */
@@ -437,77 +450,91 @@ static void arp_reply(struct sk_buff *skb)
437 arp_ptr += skb->dev->addr_len; 450 arp_ptr += skb->dev->addr_len;
438 memcpy(&sip, arp_ptr, 4); 451 memcpy(&sip, arp_ptr, 4);
439 arp_ptr += 4; 452 arp_ptr += 4;
440 /* if we actually cared about dst hw addr, it would get copied here */ 453 /* If we actually cared about dst hw addr,
454 it would get copied here */
441 arp_ptr += skb->dev->addr_len; 455 arp_ptr += skb->dev->addr_len;
442 memcpy(&tip, arp_ptr, 4); 456 memcpy(&tip, arp_ptr, 4);
443 457
444 /* Should we ignore arp? */ 458 /* Should we ignore arp? */
445 if (tip != np->local_ip || 459 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
446 ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
447 return; 460 return;
448 461
449 size = arp_hdr_len(skb->dev); 462 size = arp_hdr_len(skb->dev);
450 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
451 LL_RESERVED_SPACE(np->dev));
452 463
453 if (!send_skb) 464 spin_lock_irqsave(&npinfo->rx_lock, flags);
454 return; 465 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
455 466 if (tip != np->local_ip)
456 skb_reset_network_header(send_skb); 467 continue;
457 arp = (struct arphdr *) skb_put(send_skb, size);
458 send_skb->dev = skb->dev;
459 send_skb->protocol = htons(ETH_P_ARP);
460 468
461 /* Fill the device header for the ARP frame */ 469 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
462 if (dev_hard_header(send_skb, skb->dev, ptype, 470 LL_RESERVED_SPACE(np->dev));
463 sha, np->dev->dev_addr, 471 if (!send_skb)
464 send_skb->len) < 0) { 472 continue;
465 kfree_skb(send_skb);
466 return;
467 }
468 473
469 /* 474 skb_reset_network_header(send_skb);
470 * Fill out the arp protocol part. 475 arp = (struct arphdr *) skb_put(send_skb, size);
471 * 476 send_skb->dev = skb->dev;
472 * we only support ethernet device type, 477 send_skb->protocol = htons(ETH_P_ARP);
473 * which (according to RFC 1390) should always equal 1 (Ethernet).
474 */
475 478
476 arp->ar_hrd = htons(np->dev->type); 479 /* Fill the device header for the ARP frame */
477 arp->ar_pro = htons(ETH_P_IP); 480 if (dev_hard_header(send_skb, skb->dev, ptype,
478 arp->ar_hln = np->dev->addr_len; 481 sha, np->dev->dev_addr,
479 arp->ar_pln = 4; 482 send_skb->len) < 0) {
480 arp->ar_op = htons(type); 483 kfree_skb(send_skb);
484 continue;
485 }
481 486
482 arp_ptr=(unsigned char *)(arp + 1); 487 /*
483 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len); 488 * Fill out the arp protocol part.
484 arp_ptr += np->dev->addr_len; 489 *
485 memcpy(arp_ptr, &tip, 4); 490 * we only support ethernet device type,
486 arp_ptr += 4; 491 * which (according to RFC 1390) should
487 memcpy(arp_ptr, sha, np->dev->addr_len); 492 * always equal 1 (Ethernet).
488 arp_ptr += np->dev->addr_len; 493 */
489 memcpy(arp_ptr, &sip, 4);
490 494
491 netpoll_send_skb(np, send_skb); 495 arp->ar_hrd = htons(np->dev->type);
496 arp->ar_pro = htons(ETH_P_IP);
497 arp->ar_hln = np->dev->addr_len;
498 arp->ar_pln = 4;
499 arp->ar_op = htons(type);
500
501 arp_ptr = (unsigned char *)(arp + 1);
502 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
503 arp_ptr += np->dev->addr_len;
504 memcpy(arp_ptr, &tip, 4);
505 arp_ptr += 4;
506 memcpy(arp_ptr, sha, np->dev->addr_len);
507 arp_ptr += np->dev->addr_len;
508 memcpy(arp_ptr, &sip, 4);
509
510 netpoll_send_skb(np, send_skb);
511
512 /* If there are several rx_hooks for the same address,
513 we're fine by sending a single reply */
514 break;
515 }
516 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
492} 517}
493 518
494int __netpoll_rx(struct sk_buff *skb) 519int __netpoll_rx(struct sk_buff *skb)
495{ 520{
496 int proto, len, ulen; 521 int proto, len, ulen;
522 int hits = 0;
497 struct iphdr *iph; 523 struct iphdr *iph;
498 struct udphdr *uh; 524 struct udphdr *uh;
499 struct netpoll_info *npi = skb->dev->npinfo; 525 struct netpoll_info *npinfo = skb->dev->npinfo;
500 struct netpoll *np = npi->rx_np; 526 struct netpoll *np, *tmp;
501 527
502 if (!np) 528 if (list_empty(&npinfo->rx_np))
503 goto out; 529 goto out;
530
504 if (skb->dev->type != ARPHRD_ETHER) 531 if (skb->dev->type != ARPHRD_ETHER)
505 goto out; 532 goto out;
506 533
507 /* check if netpoll clients need ARP */ 534 /* check if netpoll clients need ARP */
508 if (skb->protocol == htons(ETH_P_ARP) && 535 if (skb->protocol == htons(ETH_P_ARP) &&
509 atomic_read(&trapped)) { 536 atomic_read(&trapped)) {
510 skb_queue_tail(&npi->arp_tx, skb); 537 skb_queue_tail(&npinfo->arp_tx, skb);
511 return 1; 538 return 1;
512 } 539 }
513 540
@@ -551,16 +578,23 @@ int __netpoll_rx(struct sk_buff *skb)
551 goto out; 578 goto out;
552 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr)) 579 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
553 goto out; 580 goto out;
554 if (np->local_ip && np->local_ip != iph->daddr)
555 goto out;
556 if (np->remote_ip && np->remote_ip != iph->saddr)
557 goto out;
558 if (np->local_port && np->local_port != ntohs(uh->dest))
559 goto out;
560 581
561 np->rx_hook(np, ntohs(uh->source), 582 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
562 (char *)(uh+1), 583 if (np->local_ip && np->local_ip != iph->daddr)
563 ulen - sizeof(struct udphdr)); 584 continue;
585 if (np->remote_ip && np->remote_ip != iph->saddr)
586 continue;
587 if (np->local_port && np->local_port != ntohs(uh->dest))
588 continue;
589
590 np->rx_hook(np, ntohs(uh->source),
591 (char *)(uh+1),
592 ulen - sizeof(struct udphdr));
593 hits++;
594 }
595
596 if (!hits)
597 goto out;
564 598
565 kfree_skb(skb); 599 kfree_skb(skb);
566 return 1; 600 return 1;
@@ -684,6 +718,7 @@ int netpoll_setup(struct netpoll *np)
684 struct net_device *ndev = NULL; 718 struct net_device *ndev = NULL;
685 struct in_device *in_dev; 719 struct in_device *in_dev;
686 struct netpoll_info *npinfo; 720 struct netpoll_info *npinfo;
721 struct netpoll *npe, *tmp;
687 unsigned long flags; 722 unsigned long flags;
688 int err; 723 int err;
689 724
@@ -704,7 +739,7 @@ int netpoll_setup(struct netpoll *np)
704 } 739 }
705 740
706 npinfo->rx_flags = 0; 741 npinfo->rx_flags = 0;
707 npinfo->rx_np = NULL; 742 INIT_LIST_HEAD(&npinfo->rx_np);
708 743
709 spin_lock_init(&npinfo->rx_lock); 744 spin_lock_init(&npinfo->rx_lock);
710 skb_queue_head_init(&npinfo->arp_tx); 745 skb_queue_head_init(&npinfo->arp_tx);
@@ -785,7 +820,7 @@ int netpoll_setup(struct netpoll *np)
785 if (np->rx_hook) { 820 if (np->rx_hook) {
786 spin_lock_irqsave(&npinfo->rx_lock, flags); 821 spin_lock_irqsave(&npinfo->rx_lock, flags);
787 npinfo->rx_flags |= NETPOLL_RX_ENABLED; 822 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
788 npinfo->rx_np = np; 823 list_add_tail(&np->rx, &npinfo->rx_np);
789 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 824 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
790 } 825 }
791 826
@@ -801,9 +836,16 @@ int netpoll_setup(struct netpoll *np)
801 return 0; 836 return 0;
802 837
803 release: 838 release:
804 if (!ndev->npinfo) 839 if (!ndev->npinfo) {
840 spin_lock_irqsave(&npinfo->rx_lock, flags);
841 list_for_each_entry_safe(npe, tmp, &npinfo->rx_np, rx) {
842 npe->dev = NULL;
843 }
844 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
845
805 kfree(npinfo); 846 kfree(npinfo);
806 np->dev = NULL; 847 }
848
807 dev_put(ndev); 849 dev_put(ndev);
808 return err; 850 return err;
809} 851}
@@ -823,10 +865,11 @@ void netpoll_cleanup(struct netpoll *np)
823 if (np->dev) { 865 if (np->dev) {
824 npinfo = np->dev->npinfo; 866 npinfo = np->dev->npinfo;
825 if (npinfo) { 867 if (npinfo) {
826 if (npinfo->rx_np == np) { 868 if (!list_empty(&npinfo->rx_np)) {
827 spin_lock_irqsave(&npinfo->rx_lock, flags); 869 spin_lock_irqsave(&npinfo->rx_lock, flags);
828 npinfo->rx_np = NULL; 870 list_del(&np->rx);
829 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; 871 if (list_empty(&npinfo->rx_np))
872 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
830 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 873 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
831 } 874 }
832 875
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2e692afdc55..43923811bd6 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2188,12 +2188,13 @@ static inline int f_pick(struct pktgen_dev *pkt_dev)
2188/* If there was already an IPSEC SA, we keep it as is, else 2188/* If there was already an IPSEC SA, we keep it as is, else
2189 * we go look for it ... 2189 * we go look for it ...
2190*/ 2190*/
2191#define DUMMY_MARK 0
2191static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) 2192static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
2192{ 2193{
2193 struct xfrm_state *x = pkt_dev->flows[flow].x; 2194 struct xfrm_state *x = pkt_dev->flows[flow].x;
2194 if (!x) { 2195 if (!x) {
2195 /*slow path: we dont already have xfrm_state*/ 2196 /*slow path: we dont already have xfrm_state*/
2196 x = xfrm_stateonly_find(&init_net, 2197 x = xfrm_stateonly_find(&init_net, DUMMY_MARK,
2197 (xfrm_address_t *)&pkt_dev->cur_daddr, 2198 (xfrm_address_t *)&pkt_dev->cur_daddr,
2198 (xfrm_address_t *)&pkt_dev->cur_saddr, 2199 (xfrm_address_t *)&pkt_dev->cur_saddr,
2199 AF_INET, 2200 AF_INET,
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 794bcb897ff..42da96a4eee 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -35,6 +35,7 @@
35#include <linux/security.h> 35#include <linux/security.h>
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/if_addr.h> 37#include <linux/if_addr.h>
38#include <linux/pci.h>
38 39
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
40#include <asm/system.h> 41#include <asm/system.h>
@@ -580,6 +581,15 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
580 a->tx_compressed = b->tx_compressed; 581 a->tx_compressed = b->tx_compressed;
581}; 582};
582 583
584static inline int rtnl_vfinfo_size(const struct net_device *dev)
585{
586 if (dev->dev.parent && dev_is_pci(dev->dev.parent))
587 return dev_num_vf(dev->dev.parent) *
588 sizeof(struct ifla_vf_info);
589 else
590 return 0;
591}
592
583static inline size_t if_nlmsg_size(const struct net_device *dev) 593static inline size_t if_nlmsg_size(const struct net_device *dev)
584{ 594{
585 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 595 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
@@ -597,6 +607,8 @@ static inline size_t if_nlmsg_size(const struct net_device *dev)
597 + nla_total_size(4) /* IFLA_MASTER */ 607 + nla_total_size(4) /* IFLA_MASTER */
598 + nla_total_size(1) /* IFLA_OPERSTATE */ 608 + nla_total_size(1) /* IFLA_OPERSTATE */
599 + nla_total_size(1) /* IFLA_LINKMODE */ 609 + nla_total_size(1) /* IFLA_LINKMODE */
610 + nla_total_size(4) /* IFLA_NUM_VF */
611 + nla_total_size(rtnl_vfinfo_size(dev)) /* IFLA_VFINFO */
600 + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ 612 + rtnl_link_get_size(dev); /* IFLA_LINKINFO */
601} 613}
602 614
@@ -665,6 +677,17 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
665 stats = dev_get_stats(dev); 677 stats = dev_get_stats(dev);
666 copy_rtnl_link_stats(nla_data(attr), stats); 678 copy_rtnl_link_stats(nla_data(attr), stats);
667 679
680 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) {
681 int i;
682 struct ifla_vf_info ivi;
683
684 NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
685 for (i = 0; i < dev_num_vf(dev->dev.parent); i++) {
686 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
687 break;
688 NLA_PUT(skb, IFLA_VFINFO, sizeof(ivi), &ivi);
689 }
690 }
668 if (dev->rtnl_link_ops) { 691 if (dev->rtnl_link_ops) {
669 if (rtnl_link_fill(skb, dev) < 0) 692 if (rtnl_link_fill(skb, dev) < 0)
670 goto nla_put_failure; 693 goto nla_put_failure;
@@ -725,6 +748,12 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
725 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 748 [IFLA_LINKINFO] = { .type = NLA_NESTED },
726 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 749 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
727 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, 750 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
751 [IFLA_VF_MAC] = { .type = NLA_BINARY,
752 .len = sizeof(struct ifla_vf_mac) },
753 [IFLA_VF_VLAN] = { .type = NLA_BINARY,
754 .len = sizeof(struct ifla_vf_vlan) },
755 [IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
756 .len = sizeof(struct ifla_vf_tx_rate) },
728}; 757};
729EXPORT_SYMBOL(ifla_policy); 758EXPORT_SYMBOL(ifla_policy);
730 759
@@ -898,6 +927,44 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
898 write_unlock_bh(&dev_base_lock); 927 write_unlock_bh(&dev_base_lock);
899 } 928 }
900 929
930 if (tb[IFLA_VF_MAC]) {
931 struct ifla_vf_mac *ivm;
932 ivm = nla_data(tb[IFLA_VF_MAC]);
933 write_lock_bh(&dev_base_lock);
934 if (ops->ndo_set_vf_mac)
935 err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac);
936 write_unlock_bh(&dev_base_lock);
937 if (err < 0)
938 goto errout;
939 modified = 1;
940 }
941
942 if (tb[IFLA_VF_VLAN]) {
943 struct ifla_vf_vlan *ivv;
944 ivv = nla_data(tb[IFLA_VF_VLAN]);
945 write_lock_bh(&dev_base_lock);
946 if (ops->ndo_set_vf_vlan)
947 err = ops->ndo_set_vf_vlan(dev, ivv->vf,
948 (u16)ivv->vlan,
949 (u8)ivv->qos);
950 write_unlock_bh(&dev_base_lock);
951 if (err < 0)
952 goto errout;
953 modified = 1;
954 }
955 err = 0;
956
957 if (tb[IFLA_VF_TX_RATE]) {
958 struct ifla_vf_tx_rate *ivt;
959 ivt = nla_data(tb[IFLA_VF_TX_RATE]);
960 write_lock_bh(&dev_base_lock);
961 if (ops->ndo_set_vf_tx_rate)
962 err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate);
963 write_unlock_bh(&dev_base_lock);
964 if (err < 0)
965 goto errout;
966 modified = 1;
967 }
901 err = 0; 968 err = 0;
902 969
903errout: 970errout:
@@ -1386,7 +1453,7 @@ static struct notifier_block rtnetlink_dev_notifier = {
1386}; 1453};
1387 1454
1388 1455
1389static int rtnetlink_net_init(struct net *net) 1456static int __net_init rtnetlink_net_init(struct net *net)
1390{ 1457{
1391 struct sock *sk; 1458 struct sock *sk;
1392 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, 1459 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX,
@@ -1397,7 +1464,7 @@ static int rtnetlink_net_init(struct net *net)
1397 return 0; 1464 return 0;
1398} 1465}
1399 1466
1400static void rtnetlink_net_exit(struct net *net) 1467static void __net_exit rtnetlink_net_exit(struct net *net)
1401{ 1468{
1402 netlink_kernel_release(net->rtnl); 1469 netlink_kernel_release(net->rtnl);
1403 net->rtnl = NULL; 1470 net->rtnl = NULL;
diff --git a/net/core/sock.c b/net/core/sock.c
index e1f6f225f01..472a59f205b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -741,7 +741,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
741 struct timeval tm; 741 struct timeval tm;
742 } v; 742 } v;
743 743
744 unsigned int lv = sizeof(int); 744 int lv = sizeof(int);
745 int len; 745 int len;
746 746
747 if (get_user(len, optlen)) 747 if (get_user(len, optlen))
@@ -2140,13 +2140,13 @@ int sock_prot_inuse_get(struct net *net, struct proto *prot)
2140} 2140}
2141EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2141EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2142 2142
2143static int sock_inuse_init_net(struct net *net) 2143static int __net_init sock_inuse_init_net(struct net *net)
2144{ 2144{
2145 net->core.inuse = alloc_percpu(struct prot_inuse); 2145 net->core.inuse = alloc_percpu(struct prot_inuse);
2146 return net->core.inuse ? 0 : -ENOMEM; 2146 return net->core.inuse ? 0 : -ENOMEM;
2147} 2147}
2148 2148
2149static void sock_inuse_exit_net(struct net *net) 2149static void __net_exit sock_inuse_exit_net(struct net *net)
2150{ 2150{
2151 free_percpu(net->core.inuse); 2151 free_percpu(net->core.inuse);
2152} 2152}
@@ -2228,13 +2228,10 @@ int proto_register(struct proto *prot, int alloc_slab)
2228 } 2228 }
2229 2229
2230 if (prot->rsk_prot != NULL) { 2230 if (prot->rsk_prot != NULL) {
2231 static const char mask[] = "request_sock_%s"; 2231 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2232
2233 prot->rsk_prot->slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
2234 if (prot->rsk_prot->slab_name == NULL) 2232 if (prot->rsk_prot->slab_name == NULL)
2235 goto out_free_sock_slab; 2233 goto out_free_sock_slab;
2236 2234
2237 sprintf(prot->rsk_prot->slab_name, mask, prot->name);
2238 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, 2235 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2239 prot->rsk_prot->obj_size, 0, 2236 prot->rsk_prot->obj_size, 0,
2240 SLAB_HWCACHE_ALIGN, NULL); 2237 SLAB_HWCACHE_ALIGN, NULL);
@@ -2247,14 +2244,11 @@ int proto_register(struct proto *prot, int alloc_slab)
2247 } 2244 }
2248 2245
2249 if (prot->twsk_prot != NULL) { 2246 if (prot->twsk_prot != NULL) {
2250 static const char mask[] = "tw_sock_%s"; 2247 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2251
2252 prot->twsk_prot->twsk_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
2253 2248
2254 if (prot->twsk_prot->twsk_slab_name == NULL) 2249 if (prot->twsk_prot->twsk_slab_name == NULL)
2255 goto out_free_request_sock_slab; 2250 goto out_free_request_sock_slab;
2256 2251
2257 sprintf(prot->twsk_prot->twsk_slab_name, mask, prot->name);
2258 prot->twsk_prot->twsk_slab = 2252 prot->twsk_prot->twsk_slab =
2259 kmem_cache_create(prot->twsk_prot->twsk_slab_name, 2253 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2260 prot->twsk_prot->twsk_obj_size, 2254 prot->twsk_prot->twsk_obj_size,
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index db9f5b39388..813e399220a 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -54,7 +54,7 @@ MODULE_LICENSE("GPL");
54/**************** DCB attribute policies *************************************/ 54/**************** DCB attribute policies *************************************/
55 55
56/* DCB netlink attributes policy */ 56/* DCB netlink attributes policy */
57static struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = { 57static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
58 [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1}, 58 [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
59 [DCB_ATTR_STATE] = {.type = NLA_U8}, 59 [DCB_ATTR_STATE] = {.type = NLA_U8},
60 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED}, 60 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
@@ -68,7 +68,7 @@ static struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
68}; 68};
69 69
70/* DCB priority flow control to User Priority nested attributes */ 70/* DCB priority flow control to User Priority nested attributes */
71static struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = { 71static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
72 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8}, 72 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
73 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8}, 73 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
74 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8}, 74 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
@@ -81,7 +81,7 @@ static struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
81}; 81};
82 82
83/* DCB priority grouping nested attributes */ 83/* DCB priority grouping nested attributes */
84static struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = { 84static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
85 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED}, 85 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
86 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED}, 86 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
87 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED}, 87 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
@@ -103,7 +103,7 @@ static struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
103}; 103};
104 104
105/* DCB traffic class nested attributes. */ 105/* DCB traffic class nested attributes. */
106static struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = { 106static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
107 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8}, 107 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
108 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8}, 108 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
109 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8}, 109 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
@@ -112,7 +112,7 @@ static struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
112}; 112};
113 113
114/* DCB capabilities nested attributes. */ 114/* DCB capabilities nested attributes. */
115static struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = { 115static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
116 [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG}, 116 [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
117 [DCB_CAP_ATTR_PG] = {.type = NLA_U8}, 117 [DCB_CAP_ATTR_PG] = {.type = NLA_U8},
118 [DCB_CAP_ATTR_PFC] = {.type = NLA_U8}, 118 [DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
@@ -124,14 +124,14 @@ static struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
124}; 124};
125 125
126/* DCB capabilities nested attributes. */ 126/* DCB capabilities nested attributes. */
127static struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = { 127static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
128 [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG}, 128 [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
129 [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8}, 129 [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
130 [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8}, 130 [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
131}; 131};
132 132
133/* DCB BCN nested attributes. */ 133/* DCB BCN nested attributes. */
134static struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = { 134static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
135 [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8}, 135 [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
136 [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8}, 136 [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
137 [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8}, 137 [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
@@ -160,7 +160,7 @@ static struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
160}; 160};
161 161
162/* DCB APP nested attributes. */ 162/* DCB APP nested attributes. */
163static struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = { 163static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
164 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8}, 164 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
165 [DCB_APP_ATTR_ID] = {.type = NLA_U16}, 165 [DCB_APP_ATTR_ID] = {.type = NLA_U16},
166 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8}, 166 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index ff16e9df196..49d27c556be 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -63,14 +63,13 @@ int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
63 u8 *ccid_array, array_len; 63 u8 *ccid_array, array_len;
64 int err = 0; 64 int err = 0;
65 65
66 if (len < ARRAY_SIZE(ccids))
67 return -EINVAL;
68
69 if (ccid_get_builtin_ccids(&ccid_array, &array_len)) 66 if (ccid_get_builtin_ccids(&ccid_array, &array_len))
70 return -ENOBUFS; 67 return -ENOBUFS;
71 68
72 if (put_user(array_len, optlen) || 69 if (put_user(array_len, optlen))
73 copy_to_user(optval, ccid_array, array_len)) 70 err = -EFAULT;
71 else if (len > 0 && copy_to_user(optval, ccid_array,
72 len > array_len ? array_len : len))
74 err = -EFAULT; 73 err = -EFAULT;
75 74
76 kfree(ccid_array); 75 kfree(ccid_array);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index dad7bc4878e..b195c4feaa0 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -996,7 +996,7 @@ static struct inet_protosw dccp_v4_protosw = {
996 .flags = INET_PROTOSW_ICSK, 996 .flags = INET_PROTOSW_ICSK,
997}; 997};
998 998
999static int dccp_v4_init_net(struct net *net) 999static int __net_init dccp_v4_init_net(struct net *net)
1000{ 1000{
1001 int err; 1001 int err;
1002 1002
@@ -1005,7 +1005,7 @@ static int dccp_v4_init_net(struct net *net)
1005 return err; 1005 return err;
1006} 1006}
1007 1007
1008static void dccp_v4_exit_net(struct net *net) 1008static void __net_exit dccp_v4_exit_net(struct net *net)
1009{ 1009{
1010 inet_ctl_sock_destroy(net->dccp.v4_ctl_sk); 1010 inet_ctl_sock_destroy(net->dccp.v4_ctl_sk);
1011} 1011}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index baf05cf43c2..1aec6349e85 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1189,7 +1189,7 @@ static struct inet_protosw dccp_v6_protosw = {
1189 .flags = INET_PROTOSW_ICSK, 1189 .flags = INET_PROTOSW_ICSK,
1190}; 1190};
1191 1191
1192static int dccp_v6_init_net(struct net *net) 1192static int __net_init dccp_v6_init_net(struct net *net)
1193{ 1193{
1194 int err; 1194 int err;
1195 1195
@@ -1198,7 +1198,7 @@ static int dccp_v6_init_net(struct net *net)
1198 return err; 1198 return err;
1199} 1199}
1200 1200
1201static void dccp_v6_exit_net(struct net *net) 1201static void __net_exit dccp_v6_exit_net(struct net *net)
1202{ 1202{
1203 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk); 1203 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1204} 1204}
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 671cd1413d5..0ef7061920c 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -835,6 +835,8 @@ verify_sock_status:
835 len = -EFAULT; 835 len = -EFAULT;
836 break; 836 break;
837 } 837 }
838 if (flags & MSG_TRUNC)
839 len = skb->len;
838 found_fin_ok: 840 found_fin_ok:
839 if (!(flags & MSG_PEEK)) 841 if (!(flags & MSG_PEEK))
840 sk_eat_skb(sk, skb, 0); 842 sk_eat_skb(sk, skb, 0);
@@ -1003,12 +1005,13 @@ EXPORT_SYMBOL_GPL(dccp_shutdown);
1003 1005
1004static inline int dccp_mib_init(void) 1006static inline int dccp_mib_init(void)
1005{ 1007{
1006 return snmp_mib_init((void**)dccp_statistics, sizeof(struct dccp_mib)); 1008 return snmp_mib_init((void __percpu **)dccp_statistics,
1009 sizeof(struct dccp_mib));
1007} 1010}
1008 1011
1009static inline void dccp_mib_exit(void) 1012static inline void dccp_mib_exit(void)
1010{ 1013{
1011 snmp_mib_free((void**)dccp_statistics); 1014 snmp_mib_free((void __percpu **)dccp_statistics);
1012} 1015}
1013 1016
1014static int thash_entries; 1017static int thash_entries;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index dd3db88f8f0..205a1c12f3c 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -73,8 +73,8 @@ __setup("ether=", netdev_boot_setup);
73 * @len: packet length (<= skb->len) 73 * @len: packet length (<= skb->len)
74 * 74 *
75 * 75 *
76 * Set the protocol type. For a packet of type ETH_P_802_3 we put the length 76 * Set the protocol type. For a packet of type ETH_P_802_3/2 we put the length
77 * in here instead. It is up to the 802.2 layer to carry protocol information. 77 * in here instead.
78 */ 78 */
79int eth_header(struct sk_buff *skb, struct net_device *dev, 79int eth_header(struct sk_buff *skb, struct net_device *dev,
80 unsigned short type, 80 unsigned short type,
@@ -82,7 +82,7 @@ int eth_header(struct sk_buff *skb, struct net_device *dev,
82{ 82{
83 struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN); 83 struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
84 84
85 if (type != ETH_P_802_3) 85 if (type != ETH_P_802_3 && type != ETH_P_802_2)
86 eth->h_proto = htons(type); 86 eth->h_proto = htons(type);
87 else 87 else
88 eth->h_proto = htons(len); 88 eth->h_proto = htons(len);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 7d12c6a9b19..33b7dffa773 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1385,7 +1385,7 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1385} 1385}
1386EXPORT_SYMBOL_GPL(inet_ctl_sock_create); 1386EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1387 1387
1388unsigned long snmp_fold_field(void *mib[], int offt) 1388unsigned long snmp_fold_field(void __percpu *mib[], int offt)
1389{ 1389{
1390 unsigned long res = 0; 1390 unsigned long res = 0;
1391 int i; 1391 int i;
@@ -1398,7 +1398,7 @@ unsigned long snmp_fold_field(void *mib[], int offt)
1398} 1398}
1399EXPORT_SYMBOL_GPL(snmp_fold_field); 1399EXPORT_SYMBOL_GPL(snmp_fold_field);
1400 1400
1401int snmp_mib_init(void *ptr[2], size_t mibsize) 1401int snmp_mib_init(void __percpu *ptr[2], size_t mibsize)
1402{ 1402{
1403 BUG_ON(ptr == NULL); 1403 BUG_ON(ptr == NULL);
1404 ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); 1404 ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long));
@@ -1416,7 +1416,7 @@ err0:
1416} 1416}
1417EXPORT_SYMBOL_GPL(snmp_mib_init); 1417EXPORT_SYMBOL_GPL(snmp_mib_init);
1418 1418
1419void snmp_mib_free(void *ptr[2]) 1419void snmp_mib_free(void __percpu *ptr[2])
1420{ 1420{
1421 BUG_ON(ptr == NULL); 1421 BUG_ON(ptr == NULL);
1422 free_percpu(ptr[0]); 1422 free_percpu(ptr[0]);
@@ -1460,25 +1460,25 @@ static const struct net_protocol icmp_protocol = {
1460 1460
1461static __net_init int ipv4_mib_init_net(struct net *net) 1461static __net_init int ipv4_mib_init_net(struct net *net)
1462{ 1462{
1463 if (snmp_mib_init((void **)net->mib.tcp_statistics, 1463 if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
1464 sizeof(struct tcp_mib)) < 0) 1464 sizeof(struct tcp_mib)) < 0)
1465 goto err_tcp_mib; 1465 goto err_tcp_mib;
1466 if (snmp_mib_init((void **)net->mib.ip_statistics, 1466 if (snmp_mib_init((void __percpu **)net->mib.ip_statistics,
1467 sizeof(struct ipstats_mib)) < 0) 1467 sizeof(struct ipstats_mib)) < 0)
1468 goto err_ip_mib; 1468 goto err_ip_mib;
1469 if (snmp_mib_init((void **)net->mib.net_statistics, 1469 if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
1470 sizeof(struct linux_mib)) < 0) 1470 sizeof(struct linux_mib)) < 0)
1471 goto err_net_mib; 1471 goto err_net_mib;
1472 if (snmp_mib_init((void **)net->mib.udp_statistics, 1472 if (snmp_mib_init((void __percpu **)net->mib.udp_statistics,
1473 sizeof(struct udp_mib)) < 0) 1473 sizeof(struct udp_mib)) < 0)
1474 goto err_udp_mib; 1474 goto err_udp_mib;
1475 if (snmp_mib_init((void **)net->mib.udplite_statistics, 1475 if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics,
1476 sizeof(struct udp_mib)) < 0) 1476 sizeof(struct udp_mib)) < 0)
1477 goto err_udplite_mib; 1477 goto err_udplite_mib;
1478 if (snmp_mib_init((void **)net->mib.icmp_statistics, 1478 if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics,
1479 sizeof(struct icmp_mib)) < 0) 1479 sizeof(struct icmp_mib)) < 0)
1480 goto err_icmp_mib; 1480 goto err_icmp_mib;
1481 if (snmp_mib_init((void **)net->mib.icmpmsg_statistics, 1481 if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics,
1482 sizeof(struct icmpmsg_mib)) < 0) 1482 sizeof(struct icmpmsg_mib)) < 0)
1483 goto err_icmpmsg_mib; 1483 goto err_icmpmsg_mib;
1484 1484
@@ -1486,30 +1486,30 @@ static __net_init int ipv4_mib_init_net(struct net *net)
1486 return 0; 1486 return 0;
1487 1487
1488err_icmpmsg_mib: 1488err_icmpmsg_mib:
1489 snmp_mib_free((void **)net->mib.icmp_statistics); 1489 snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
1490err_icmp_mib: 1490err_icmp_mib:
1491 snmp_mib_free((void **)net->mib.udplite_statistics); 1491 snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
1492err_udplite_mib: 1492err_udplite_mib:
1493 snmp_mib_free((void **)net->mib.udp_statistics); 1493 snmp_mib_free((void __percpu **)net->mib.udp_statistics);
1494err_udp_mib: 1494err_udp_mib:
1495 snmp_mib_free((void **)net->mib.net_statistics); 1495 snmp_mib_free((void __percpu **)net->mib.net_statistics);
1496err_net_mib: 1496err_net_mib:
1497 snmp_mib_free((void **)net->mib.ip_statistics); 1497 snmp_mib_free((void __percpu **)net->mib.ip_statistics);
1498err_ip_mib: 1498err_ip_mib:
1499 snmp_mib_free((void **)net->mib.tcp_statistics); 1499 snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
1500err_tcp_mib: 1500err_tcp_mib:
1501 return -ENOMEM; 1501 return -ENOMEM;
1502} 1502}
1503 1503
1504static __net_exit void ipv4_mib_exit_net(struct net *net) 1504static __net_exit void ipv4_mib_exit_net(struct net *net)
1505{ 1505{
1506 snmp_mib_free((void **)net->mib.icmpmsg_statistics); 1506 snmp_mib_free((void __percpu **)net->mib.icmpmsg_statistics);
1507 snmp_mib_free((void **)net->mib.icmp_statistics); 1507 snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
1508 snmp_mib_free((void **)net->mib.udplite_statistics); 1508 snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
1509 snmp_mib_free((void **)net->mib.udp_statistics); 1509 snmp_mib_free((void __percpu **)net->mib.udp_statistics);
1510 snmp_mib_free((void **)net->mib.net_statistics); 1510 snmp_mib_free((void __percpu **)net->mib.net_statistics);
1511 snmp_mib_free((void **)net->mib.ip_statistics); 1511 snmp_mib_free((void __percpu **)net->mib.ip_statistics);
1512 snmp_mib_free((void **)net->mib.tcp_statistics); 1512 snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
1513} 1513}
1514 1514
1515static __net_initdata struct pernet_operations ipv4_mib_ops = { 1515static __net_initdata struct pernet_operations ipv4_mib_ops = {
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 7ed3e4ae93a..987b47dc69a 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -393,7 +393,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
393 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 393 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
394 return; 394 return;
395 395
396 x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET); 396 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET);
397 if (!x) 397 if (!x)
398 return; 398 return;
399 printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n", 399 printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n",
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index c95cd93acf2..c4dd1354280 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -70,6 +70,7 @@
70 * bonding can change the skb before 70 * bonding can change the skb before
71 * sending (e.g. insert 8021q tag). 71 * sending (e.g. insert 8021q tag).
72 * Harald Welte : convert to make use of jenkins hash 72 * Harald Welte : convert to make use of jenkins hash
73 * Jesper D. Brouer: Proxy ARP PVLAN RFC 3069 support.
73 */ 74 */
74 75
75#include <linux/module.h> 76#include <linux/module.h>
@@ -524,12 +525,15 @@ int arp_bind_neighbour(struct dst_entry *dst)
524/* 525/*
525 * Check if we can use proxy ARP for this path 526 * Check if we can use proxy ARP for this path
526 */ 527 */
527 528static inline int arp_fwd_proxy(struct in_device *in_dev,
528static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt) 529 struct net_device *dev, struct rtable *rt)
529{ 530{
530 struct in_device *out_dev; 531 struct in_device *out_dev;
531 int imi, omi = -1; 532 int imi, omi = -1;
532 533
534 if (rt->u.dst.dev == dev)
535 return 0;
536
533 if (!IN_DEV_PROXY_ARP(in_dev)) 537 if (!IN_DEV_PROXY_ARP(in_dev))
534 return 0; 538 return 0;
535 539
@@ -548,6 +552,43 @@ static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt)
548} 552}
549 553
550/* 554/*
555 * Check for RFC3069 proxy arp private VLAN (allow to send back to same dev)
556 *
557 * RFC3069 supports proxy arp replies back to the same interface. This
558 * is done to support (ethernet) switch features, like RFC 3069, where
559 * the individual ports are not allowed to communicate with each
560 * other, BUT they are allowed to talk to the upstream router. As
561 * described in RFC 3069, it is possible to allow these hosts to
562 * communicate through the upstream router, by proxy_arp'ing.
563 *
564 * RFC 3069: "VLAN Aggregation for Efficient IP Address Allocation"
565 *
566 * This technology is known by different names:
567 * In RFC 3069 it is called VLAN Aggregation.
568 * Cisco and Allied Telesyn call it Private VLAN.
569 * Hewlett-Packard call it Source-Port filtering or port-isolation.
570 * Ericsson call it MAC-Forced Forwarding (RFC Draft).
571 *
572 */
573static inline int arp_fwd_pvlan(struct in_device *in_dev,
574 struct net_device *dev, struct rtable *rt,
575 __be32 sip, __be32 tip)
576{
577 /* Private VLAN is only concerned about the same ethernet segment */
578 if (rt->u.dst.dev != dev)
579 return 0;
580
581 /* Don't reply on self probes (often done by windowz boxes)*/
582 if (sip == tip)
583 return 0;
584
585 if (IN_DEV_PROXY_ARP_PVLAN(in_dev))
586 return 1;
587 else
588 return 0;
589}
590
591/*
551 * Interface to link layer: send routine and receive handler. 592 * Interface to link layer: send routine and receive handler.
552 */ 593 */
553 594
@@ -833,8 +874,11 @@ static int arp_process(struct sk_buff *skb)
833 } 874 }
834 goto out; 875 goto out;
835 } else if (IN_DEV_FORWARD(in_dev)) { 876 } else if (IN_DEV_FORWARD(in_dev)) {
836 if (addr_type == RTN_UNICAST && rt->u.dst.dev != dev && 877 if (addr_type == RTN_UNICAST &&
837 (arp_fwd_proxy(in_dev, rt) || pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) { 878 (arp_fwd_proxy(in_dev, dev, rt) ||
879 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
880 pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))
881 {
838 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 882 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
839 if (n) 883 if (n)
840 neigh_release(n); 884 neigh_release(n);
@@ -863,7 +907,8 @@ static int arp_process(struct sk_buff *skb)
863 devices (strip is candidate) 907 devices (strip is candidate)
864 */ 908 */
865 if (n == NULL && 909 if (n == NULL &&
866 arp->ar_op == htons(ARPOP_REPLY) && 910 (arp->ar_op == htons(ARPOP_REPLY) ||
911 (arp->ar_op == htons(ARPOP_REQUEST) && tip == sip)) &&
867 inet_addr_type(net, sip) == RTN_UNICAST) 912 inet_addr_type(net, sip) == RTN_UNICAST)
868 n = __neigh_lookup(&arp_tbl, &sip, dev, 1); 913 n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
869 } 914 }
@@ -1239,8 +1284,7 @@ void __init arp_init(void)
1239 dev_add_pack(&arp_packet_type); 1284 dev_add_pack(&arp_packet_type);
1240 arp_proc_init(); 1285 arp_proc_init();
1241#ifdef CONFIG_SYSCTL 1286#ifdef CONFIG_SYSCTL
1242 neigh_sysctl_register(NULL, &arp_tbl.parms, NET_IPV4, 1287 neigh_sysctl_register(NULL, &arp_tbl.parms, "ipv4", NULL);
1243 NET_IPV4_NEIGH, "ipv4", NULL);
1244#endif 1288#endif
1245 register_netdevice_notifier(&arp_netdev_notifier); 1289 register_netdevice_notifier(&arp_netdev_notifier);
1246} 1290}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 26dec2be961..51ca946e339 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -64,20 +64,20 @@
64 64
65static struct ipv4_devconf ipv4_devconf = { 65static struct ipv4_devconf ipv4_devconf = {
66 .data = { 66 .data = {
67 [NET_IPV4_CONF_ACCEPT_REDIRECTS - 1] = 1, 67 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
68 [NET_IPV4_CONF_SEND_REDIRECTS - 1] = 1, 68 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
69 [NET_IPV4_CONF_SECURE_REDIRECTS - 1] = 1, 69 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
70 [NET_IPV4_CONF_SHARED_MEDIA - 1] = 1, 70 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
71 }, 71 },
72}; 72};
73 73
74static struct ipv4_devconf ipv4_devconf_dflt = { 74static struct ipv4_devconf ipv4_devconf_dflt = {
75 .data = { 75 .data = {
76 [NET_IPV4_CONF_ACCEPT_REDIRECTS - 1] = 1, 76 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
77 [NET_IPV4_CONF_SEND_REDIRECTS - 1] = 1, 77 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
78 [NET_IPV4_CONF_SECURE_REDIRECTS - 1] = 1, 78 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
79 [NET_IPV4_CONF_SHARED_MEDIA - 1] = 1, 79 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
80 [NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE - 1] = 1, 80 [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
81 }, 81 },
82}; 82};
83 83
@@ -1365,7 +1365,7 @@ int ipv4_doint_and_flush(ctl_table *ctl, int write,
1365 { \ 1365 { \
1366 .procname = name, \ 1366 .procname = name, \
1367 .data = ipv4_devconf.data + \ 1367 .data = ipv4_devconf.data + \
1368 NET_IPV4_CONF_ ## attr - 1, \ 1368 IPV4_DEVCONF_ ## attr - 1, \
1369 .maxlen = sizeof(int), \ 1369 .maxlen = sizeof(int), \
1370 .mode = mval, \ 1370 .mode = mval, \
1371 .proc_handler = proc, \ 1371 .proc_handler = proc, \
@@ -1386,7 +1386,7 @@ int ipv4_doint_and_flush(ctl_table *ctl, int write,
1386 1386
1387static struct devinet_sysctl_table { 1387static struct devinet_sysctl_table {
1388 struct ctl_table_header *sysctl_header; 1388 struct ctl_table_header *sysctl_header;
1389 struct ctl_table devinet_vars[__NET_IPV4_CONF_MAX]; 1389 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
1390 char *dev_name; 1390 char *dev_name;
1391} devinet_sysctl = { 1391} devinet_sysctl = {
1392 .devinet_vars = { 1392 .devinet_vars = {
@@ -1413,6 +1413,7 @@ static struct devinet_sysctl_table {
1413 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"), 1413 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
1414 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"), 1414 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
1415 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"), 1415 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
1416 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
1416 1417
1417 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"), 1418 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
1418 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"), 1419 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
@@ -1491,8 +1492,7 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
1491 1492
1492static void devinet_sysctl_register(struct in_device *idev) 1493static void devinet_sysctl_register(struct in_device *idev)
1493{ 1494{
1494 neigh_sysctl_register(idev->dev, idev->arp_parms, NET_IPV4, 1495 neigh_sysctl_register(idev->dev, idev->arp_parms, "ipv4", NULL);
1495 NET_IPV4_NEIGH, "ipv4", NULL);
1496 __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name, 1496 __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
1497 &idev->cnf); 1497 &idev->cnf);
1498} 1498}
@@ -1507,7 +1507,7 @@ static struct ctl_table ctl_forward_entry[] = {
1507 { 1507 {
1508 .procname = "ip_forward", 1508 .procname = "ip_forward",
1509 .data = &ipv4_devconf.data[ 1509 .data = &ipv4_devconf.data[
1510 NET_IPV4_CONF_FORWARDING - 1], 1510 IPV4_DEVCONF_FORWARDING - 1],
1511 .maxlen = sizeof(int), 1511 .maxlen = sizeof(int),
1512 .mode = 0644, 1512 .mode = 0644,
1513 .proc_handler = devinet_sysctl_forward, 1513 .proc_handler = devinet_sysctl_forward,
@@ -1551,7 +1551,7 @@ static __net_init int devinet_init_net(struct net *net)
1551 if (tbl == NULL) 1551 if (tbl == NULL)
1552 goto err_alloc_ctl; 1552 goto err_alloc_ctl;
1553 1553
1554 tbl[0].data = &all->data[NET_IPV4_CONF_FORWARDING - 1]; 1554 tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
1555 tbl[0].extra1 = all; 1555 tbl[0].extra1 = all;
1556 tbl[0].extra2 = net; 1556 tbl[0].extra2 = net;
1557#endif 1557#endif
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 1948895beb6..14ca1f1c3fb 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -422,7 +422,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
422 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 422 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
423 return; 423 return;
424 424
425 x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET); 425 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET);
426 if (!x) 426 if (!x)
427 return; 427 return;
428 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n", 428 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 82dbf711d6d..9b3e28ed524 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -883,7 +883,7 @@ static void nl_fib_input(struct sk_buff *skb)
883 netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT); 883 netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT);
884} 884}
885 885
886static int nl_fib_lookup_init(struct net *net) 886static int __net_init nl_fib_lookup_init(struct net *net)
887{ 887{
888 struct sock *sk; 888 struct sock *sk;
889 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0, 889 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0,
@@ -1004,7 +1004,7 @@ fail:
1004 return err; 1004 return err;
1005} 1005}
1006 1006
1007static void __net_exit ip_fib_net_exit(struct net *net) 1007static void ip_fib_net_exit(struct net *net)
1008{ 1008{
1009 unsigned int i; 1009 unsigned int i;
1010 1010
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index ed19aa6919c..1af0ea0fb6a 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -62,8 +62,8 @@ static DEFINE_SPINLOCK(fib_multipath_lock);
62#define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \ 62#define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \
63for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) 63for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
64 64
65#define change_nexthops(fi) { int nhsel; struct fib_nh * nh; \ 65#define change_nexthops(fi) { int nhsel; struct fib_nh *nexthop_nh; \
66for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++) 66for (nhsel=0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nexthop_nh++, nhsel++)
67 67
68#else /* CONFIG_IP_ROUTE_MULTIPATH */ 68#else /* CONFIG_IP_ROUTE_MULTIPATH */
69 69
@@ -72,7 +72,7 @@ for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++,
72#define for_nexthops(fi) { int nhsel = 0; const struct fib_nh * nh = (fi)->fib_nh; \ 72#define for_nexthops(fi) { int nhsel = 0; const struct fib_nh * nh = (fi)->fib_nh; \
73for (nhsel=0; nhsel < 1; nhsel++) 73for (nhsel=0; nhsel < 1; nhsel++)
74 74
75#define change_nexthops(fi) { int nhsel = 0; struct fib_nh * nh = (struct fib_nh *)((fi)->fib_nh); \ 75#define change_nexthops(fi) { int nhsel = 0; struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
76for (nhsel=0; nhsel < 1; nhsel++) 76for (nhsel=0; nhsel < 1; nhsel++)
77 77
78#endif /* CONFIG_IP_ROUTE_MULTIPATH */ 78#endif /* CONFIG_IP_ROUTE_MULTIPATH */
@@ -145,9 +145,9 @@ void free_fib_info(struct fib_info *fi)
145 return; 145 return;
146 } 146 }
147 change_nexthops(fi) { 147 change_nexthops(fi) {
148 if (nh->nh_dev) 148 if (nexthop_nh->nh_dev)
149 dev_put(nh->nh_dev); 149 dev_put(nexthop_nh->nh_dev);
150 nh->nh_dev = NULL; 150 nexthop_nh->nh_dev = NULL;
151 } endfor_nexthops(fi); 151 } endfor_nexthops(fi);
152 fib_info_cnt--; 152 fib_info_cnt--;
153 release_net(fi->fib_net); 153 release_net(fi->fib_net);
@@ -162,9 +162,9 @@ void fib_release_info(struct fib_info *fi)
162 if (fi->fib_prefsrc) 162 if (fi->fib_prefsrc)
163 hlist_del(&fi->fib_lhash); 163 hlist_del(&fi->fib_lhash);
164 change_nexthops(fi) { 164 change_nexthops(fi) {
165 if (!nh->nh_dev) 165 if (!nexthop_nh->nh_dev)
166 continue; 166 continue;
167 hlist_del(&nh->nh_hash); 167 hlist_del(&nexthop_nh->nh_hash);
168 } endfor_nexthops(fi) 168 } endfor_nexthops(fi)
169 fi->fib_dead = 1; 169 fi->fib_dead = 1;
170 fib_info_put(fi); 170 fib_info_put(fi);
@@ -395,19 +395,20 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
395 if (!rtnh_ok(rtnh, remaining)) 395 if (!rtnh_ok(rtnh, remaining))
396 return -EINVAL; 396 return -EINVAL;
397 397
398 nh->nh_flags = (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; 398 nexthop_nh->nh_flags =
399 nh->nh_oif = rtnh->rtnh_ifindex; 399 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
400 nh->nh_weight = rtnh->rtnh_hops + 1; 400 nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
401 nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
401 402
402 attrlen = rtnh_attrlen(rtnh); 403 attrlen = rtnh_attrlen(rtnh);
403 if (attrlen > 0) { 404 if (attrlen > 0) {
404 struct nlattr *nla, *attrs = rtnh_attrs(rtnh); 405 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
405 406
406 nla = nla_find(attrs, attrlen, RTA_GATEWAY); 407 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
407 nh->nh_gw = nla ? nla_get_be32(nla) : 0; 408 nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
408#ifdef CONFIG_NET_CLS_ROUTE 409#ifdef CONFIG_NET_CLS_ROUTE
409 nla = nla_find(attrs, attrlen, RTA_FLOW); 410 nla = nla_find(attrs, attrlen, RTA_FLOW);
410 nh->nh_tclassid = nla ? nla_get_u32(nla) : 0; 411 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
411#endif 412#endif
412 } 413 }
413 414
@@ -527,10 +528,6 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
527 if (nh->nh_gw) { 528 if (nh->nh_gw) {
528 struct fib_result res; 529 struct fib_result res;
529 530
530#ifdef CONFIG_IP_ROUTE_PERVASIVE
531 if (nh->nh_flags&RTNH_F_PERVASIVE)
532 return 0;
533#endif
534 if (nh->nh_flags&RTNH_F_ONLINK) { 531 if (nh->nh_flags&RTNH_F_ONLINK) {
535 struct net_device *dev; 532 struct net_device *dev;
536 533
@@ -738,7 +735,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
738 735
739 fi->fib_nhs = nhs; 736 fi->fib_nhs = nhs;
740 change_nexthops(fi) { 737 change_nexthops(fi) {
741 nh->nh_parent = fi; 738 nexthop_nh->nh_parent = fi;
742 } endfor_nexthops(fi) 739 } endfor_nexthops(fi)
743 740
744 if (cfg->fc_mx) { 741 if (cfg->fc_mx) {
@@ -808,7 +805,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
808 goto failure; 805 goto failure;
809 } else { 806 } else {
810 change_nexthops(fi) { 807 change_nexthops(fi) {
811 if ((err = fib_check_nh(cfg, fi, nh)) != 0) 808 if ((err = fib_check_nh(cfg, fi, nexthop_nh)) != 0)
812 goto failure; 809 goto failure;
813 } endfor_nexthops(fi) 810 } endfor_nexthops(fi)
814 } 811 }
@@ -843,11 +840,11 @@ link_it:
843 struct hlist_head *head; 840 struct hlist_head *head;
844 unsigned int hash; 841 unsigned int hash;
845 842
846 if (!nh->nh_dev) 843 if (!nexthop_nh->nh_dev)
847 continue; 844 continue;
848 hash = fib_devindex_hashfn(nh->nh_dev->ifindex); 845 hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
849 head = &fib_info_devhash[hash]; 846 head = &fib_info_devhash[hash];
850 hlist_add_head(&nh->nh_hash, head); 847 hlist_add_head(&nexthop_nh->nh_hash, head);
851 } endfor_nexthops(fi) 848 } endfor_nexthops(fi)
852 spin_unlock_bh(&fib_info_lock); 849 spin_unlock_bh(&fib_info_lock);
853 return fi; 850 return fi;
@@ -1080,21 +1077,21 @@ int fib_sync_down_dev(struct net_device *dev, int force)
1080 prev_fi = fi; 1077 prev_fi = fi;
1081 dead = 0; 1078 dead = 0;
1082 change_nexthops(fi) { 1079 change_nexthops(fi) {
1083 if (nh->nh_flags&RTNH_F_DEAD) 1080 if (nexthop_nh->nh_flags&RTNH_F_DEAD)
1084 dead++; 1081 dead++;
1085 else if (nh->nh_dev == dev && 1082 else if (nexthop_nh->nh_dev == dev &&
1086 nh->nh_scope != scope) { 1083 nexthop_nh->nh_scope != scope) {
1087 nh->nh_flags |= RTNH_F_DEAD; 1084 nexthop_nh->nh_flags |= RTNH_F_DEAD;
1088#ifdef CONFIG_IP_ROUTE_MULTIPATH 1085#ifdef CONFIG_IP_ROUTE_MULTIPATH
1089 spin_lock_bh(&fib_multipath_lock); 1086 spin_lock_bh(&fib_multipath_lock);
1090 fi->fib_power -= nh->nh_power; 1087 fi->fib_power -= nexthop_nh->nh_power;
1091 nh->nh_power = 0; 1088 nexthop_nh->nh_power = 0;
1092 spin_unlock_bh(&fib_multipath_lock); 1089 spin_unlock_bh(&fib_multipath_lock);
1093#endif 1090#endif
1094 dead++; 1091 dead++;
1095 } 1092 }
1096#ifdef CONFIG_IP_ROUTE_MULTIPATH 1093#ifdef CONFIG_IP_ROUTE_MULTIPATH
1097 if (force > 1 && nh->nh_dev == dev) { 1094 if (force > 1 && nexthop_nh->nh_dev == dev) {
1098 dead = fi->fib_nhs; 1095 dead = fi->fib_nhs;
1099 break; 1096 break;
1100 } 1097 }
@@ -1144,18 +1141,20 @@ int fib_sync_up(struct net_device *dev)
1144 prev_fi = fi; 1141 prev_fi = fi;
1145 alive = 0; 1142 alive = 0;
1146 change_nexthops(fi) { 1143 change_nexthops(fi) {
1147 if (!(nh->nh_flags&RTNH_F_DEAD)) { 1144 if (!(nexthop_nh->nh_flags&RTNH_F_DEAD)) {
1148 alive++; 1145 alive++;
1149 continue; 1146 continue;
1150 } 1147 }
1151 if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP)) 1148 if (nexthop_nh->nh_dev == NULL ||
1149 !(nexthop_nh->nh_dev->flags&IFF_UP))
1152 continue; 1150 continue;
1153 if (nh->nh_dev != dev || !__in_dev_get_rtnl(dev)) 1151 if (nexthop_nh->nh_dev != dev ||
1152 !__in_dev_get_rtnl(dev))
1154 continue; 1153 continue;
1155 alive++; 1154 alive++;
1156 spin_lock_bh(&fib_multipath_lock); 1155 spin_lock_bh(&fib_multipath_lock);
1157 nh->nh_power = 0; 1156 nexthop_nh->nh_power = 0;
1158 nh->nh_flags &= ~RTNH_F_DEAD; 1157 nexthop_nh->nh_flags &= ~RTNH_F_DEAD;
1159 spin_unlock_bh(&fib_multipath_lock); 1158 spin_unlock_bh(&fib_multipath_lock);
1160 } endfor_nexthops(fi) 1159 } endfor_nexthops(fi)
1161 1160
@@ -1182,9 +1181,9 @@ void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
1182 if (fi->fib_power <= 0) { 1181 if (fi->fib_power <= 0) {
1183 int power = 0; 1182 int power = 0;
1184 change_nexthops(fi) { 1183 change_nexthops(fi) {
1185 if (!(nh->nh_flags&RTNH_F_DEAD)) { 1184 if (!(nexthop_nh->nh_flags&RTNH_F_DEAD)) {
1186 power += nh->nh_weight; 1185 power += nexthop_nh->nh_weight;
1187 nh->nh_power = nh->nh_weight; 1186 nexthop_nh->nh_power = nexthop_nh->nh_weight;
1188 } 1187 }
1189 } endfor_nexthops(fi); 1188 } endfor_nexthops(fi);
1190 fi->fib_power = power; 1189 fi->fib_power = power;
@@ -1204,9 +1203,10 @@ void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
1204 w = jiffies % fi->fib_power; 1203 w = jiffies % fi->fib_power;
1205 1204
1206 change_nexthops(fi) { 1205 change_nexthops(fi) {
1207 if (!(nh->nh_flags&RTNH_F_DEAD) && nh->nh_power) { 1206 if (!(nexthop_nh->nh_flags&RTNH_F_DEAD) &&
1208 if ((w -= nh->nh_power) <= 0) { 1207 nexthop_nh->nh_power) {
1209 nh->nh_power--; 1208 if ((w -= nexthop_nh->nh_power) <= 0) {
1209 nexthop_nh->nh_power--;
1210 fi->fib_power--; 1210 fi->fib_power--;
1211 res->nh_sel = nhsel; 1211 res->nh_sel = nhsel;
1212 spin_unlock_bh(&fib_multipath_lock); 1212 spin_unlock_bh(&fib_multipath_lock);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index fe11f60ce41..4b4c2bcd15d 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -114,7 +114,7 @@ struct icmp_bxm {
114/* An array of errno for error messages from dest unreach. */ 114/* An array of errno for error messages from dest unreach. */
115/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */ 115/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
116 116
117struct icmp_err icmp_err_convert[] = { 117const struct icmp_err icmp_err_convert[] = {
118 { 118 {
119 .errno = ENETUNREACH, /* ICMP_NET_UNREACH */ 119 .errno = ENETUNREACH, /* ICMP_NET_UNREACH */
120 .fatal = 0, 120 .fatal = 0,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index a42f658e756..63bf298ca10 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1799,7 +1799,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1799 iml->next = inet->mc_list; 1799 iml->next = inet->mc_list;
1800 iml->sflist = NULL; 1800 iml->sflist = NULL;
1801 iml->sfmode = MCAST_EXCLUDE; 1801 iml->sfmode = MCAST_EXCLUDE;
1802 inet->mc_list = iml; 1802 rcu_assign_pointer(inet->mc_list, iml);
1803 ip_mc_inc_group(in_dev, addr); 1803 ip_mc_inc_group(in_dev, addr);
1804 err = 0; 1804 err = 0;
1805done: 1805done:
@@ -1807,24 +1807,46 @@ done:
1807 return err; 1807 return err;
1808} 1808}
1809 1809
1810static void ip_sf_socklist_reclaim(struct rcu_head *rp)
1811{
1812 struct ip_sf_socklist *psf;
1813
1814 psf = container_of(rp, struct ip_sf_socklist, rcu);
1815 /* sk_omem_alloc should have been decreased by the caller*/
1816 kfree(psf);
1817}
1818
1810static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, 1819static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1811 struct in_device *in_dev) 1820 struct in_device *in_dev)
1812{ 1821{
1822 struct ip_sf_socklist *psf = iml->sflist;
1813 int err; 1823 int err;
1814 1824
1815 if (iml->sflist == NULL) { 1825 if (psf == NULL) {
1816 /* any-source empty exclude case */ 1826 /* any-source empty exclude case */
1817 return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, 1827 return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
1818 iml->sfmode, 0, NULL, 0); 1828 iml->sfmode, 0, NULL, 0);
1819 } 1829 }
1820 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, 1830 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
1821 iml->sfmode, iml->sflist->sl_count, 1831 iml->sfmode, psf->sl_count, psf->sl_addr, 0);
1822 iml->sflist->sl_addr, 0); 1832 rcu_assign_pointer(iml->sflist, NULL);
1823 sock_kfree_s(sk, iml->sflist, IP_SFLSIZE(iml->sflist->sl_max)); 1833 /* decrease mem now to avoid the memleak warning */
1824 iml->sflist = NULL; 1834 atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
1835 call_rcu(&psf->rcu, ip_sf_socklist_reclaim);
1825 return err; 1836 return err;
1826} 1837}
1827 1838
1839
1840static void ip_mc_socklist_reclaim(struct rcu_head *rp)
1841{
1842 struct ip_mc_socklist *iml;
1843
1844 iml = container_of(rp, struct ip_mc_socklist, rcu);
1845 /* sk_omem_alloc should have been decreased by the caller*/
1846 kfree(iml);
1847}
1848
1849
1828/* 1850/*
1829 * Ask a socket to leave a group. 1851 * Ask a socket to leave a group.
1830 */ 1852 */
@@ -1854,12 +1876,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1854 1876
1855 (void) ip_mc_leave_src(sk, iml, in_dev); 1877 (void) ip_mc_leave_src(sk, iml, in_dev);
1856 1878
1857 *imlp = iml->next; 1879 rcu_assign_pointer(*imlp, iml->next);
1858 1880
1859 if (in_dev) 1881 if (in_dev)
1860 ip_mc_dec_group(in_dev, group); 1882 ip_mc_dec_group(in_dev, group);
1861 rtnl_unlock(); 1883 rtnl_unlock();
1862 sock_kfree_s(sk, iml, sizeof(*iml)); 1884 /* decrease mem now to avoid the memleak warning */
1885 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
1886 call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
1863 return 0; 1887 return 0;
1864 } 1888 }
1865 if (!in_dev) 1889 if (!in_dev)
@@ -1974,9 +1998,12 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
1974 if (psl) { 1998 if (psl) {
1975 for (i=0; i<psl->sl_count; i++) 1999 for (i=0; i<psl->sl_count; i++)
1976 newpsl->sl_addr[i] = psl->sl_addr[i]; 2000 newpsl->sl_addr[i] = psl->sl_addr[i];
1977 sock_kfree_s(sk, psl, IP_SFLSIZE(psl->sl_max)); 2001 /* decrease mem now to avoid the memleak warning */
2002 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2003 call_rcu(&psl->rcu, ip_sf_socklist_reclaim);
1978 } 2004 }
1979 pmc->sflist = psl = newpsl; 2005 rcu_assign_pointer(pmc->sflist, newpsl);
2006 psl = newpsl;
1980 } 2007 }
1981 rv = 1; /* > 0 for insert logic below if sl_count is 0 */ 2008 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
1982 for (i=0; i<psl->sl_count; i++) { 2009 for (i=0; i<psl->sl_count; i++) {
@@ -2072,11 +2099,13 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2072 if (psl) { 2099 if (psl) {
2073 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2100 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2074 psl->sl_count, psl->sl_addr, 0); 2101 psl->sl_count, psl->sl_addr, 0);
2075 sock_kfree_s(sk, psl, IP_SFLSIZE(psl->sl_max)); 2102 /* decrease mem now to avoid the memleak warning */
2103 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2104 call_rcu(&psl->rcu, ip_sf_socklist_reclaim);
2076 } else 2105 } else
2077 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2106 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2078 0, NULL, 0); 2107 0, NULL, 0);
2079 pmc->sflist = newpsl; 2108 rcu_assign_pointer(pmc->sflist, newpsl);
2080 pmc->sfmode = msf->imsf_fmode; 2109 pmc->sfmode = msf->imsf_fmode;
2081 err = 0; 2110 err = 0;
2082done: 2111done:
@@ -2209,30 +2238,40 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
2209 struct ip_mc_socklist *pmc; 2238 struct ip_mc_socklist *pmc;
2210 struct ip_sf_socklist *psl; 2239 struct ip_sf_socklist *psl;
2211 int i; 2240 int i;
2241 int ret;
2212 2242
2243 ret = 1;
2213 if (!ipv4_is_multicast(loc_addr)) 2244 if (!ipv4_is_multicast(loc_addr))
2214 return 1; 2245 goto out;
2215 2246
2216 for (pmc=inet->mc_list; pmc; pmc=pmc->next) { 2247 rcu_read_lock();
2248 for (pmc=rcu_dereference(inet->mc_list); pmc; pmc=rcu_dereference(pmc->next)) {
2217 if (pmc->multi.imr_multiaddr.s_addr == loc_addr && 2249 if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
2218 pmc->multi.imr_ifindex == dif) 2250 pmc->multi.imr_ifindex == dif)
2219 break; 2251 break;
2220 } 2252 }
2253 ret = inet->mc_all;
2221 if (!pmc) 2254 if (!pmc)
2222 return inet->mc_all; 2255 goto unlock;
2223 psl = pmc->sflist; 2256 psl = pmc->sflist;
2257 ret = (pmc->sfmode == MCAST_EXCLUDE);
2224 if (!psl) 2258 if (!psl)
2225 return pmc->sfmode == MCAST_EXCLUDE; 2259 goto unlock;
2226 2260
2227 for (i=0; i<psl->sl_count; i++) { 2261 for (i=0; i<psl->sl_count; i++) {
2228 if (psl->sl_addr[i] == rmt_addr) 2262 if (psl->sl_addr[i] == rmt_addr)
2229 break; 2263 break;
2230 } 2264 }
2265 ret = 0;
2231 if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count) 2266 if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
2232 return 0; 2267 goto unlock;
2233 if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) 2268 if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
2234 return 0; 2269 goto unlock;
2235 return 1; 2270 ret = 1;
2271unlock:
2272 rcu_read_unlock();
2273out:
2274 return ret;
2236} 2275}
2237 2276
2238/* 2277/*
@@ -2251,7 +2290,7 @@ void ip_mc_drop_socket(struct sock *sk)
2251 rtnl_lock(); 2290 rtnl_lock();
2252 while ((iml = inet->mc_list) != NULL) { 2291 while ((iml = inet->mc_list) != NULL) {
2253 struct in_device *in_dev; 2292 struct in_device *in_dev;
2254 inet->mc_list = iml->next; 2293 rcu_assign_pointer(inet->mc_list, iml->next);
2255 2294
2256 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); 2295 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
2257 (void) ip_mc_leave_src(sk, iml, in_dev); 2296 (void) ip_mc_leave_src(sk, iml, in_dev);
@@ -2259,7 +2298,9 @@ void ip_mc_drop_socket(struct sock *sk)
2259 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2298 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2260 in_dev_put(in_dev); 2299 in_dev_put(in_dev);
2261 } 2300 }
2262 sock_kfree_s(sk, iml, sizeof(*iml)); 2301 /* decrease mem now to avoid the memleak warning */
2302 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
2303 call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
2263 } 2304 }
2264 rtnl_unlock(); 2305 rtnl_unlock();
2265} 2306}
@@ -2603,7 +2644,7 @@ static const struct file_operations igmp_mcf_seq_fops = {
2603 .release = seq_release_net, 2644 .release = seq_release_net,
2604}; 2645};
2605 2646
2606static int igmp_net_init(struct net *net) 2647static int __net_init igmp_net_init(struct net *net)
2607{ 2648{
2608 struct proc_dir_entry *pde; 2649 struct proc_dir_entry *pde;
2609 2650
@@ -2621,7 +2662,7 @@ out_igmp:
2621 return -ENOMEM; 2662 return -ENOMEM;
2622} 2663}
2623 2664
2624static void igmp_net_exit(struct net *net) 2665static void __net_exit igmp_net_exit(struct net *net)
2625{ 2666{
2626 proc_net_remove(net, "mcfilter"); 2667 proc_net_remove(net, "mcfilter");
2627 proc_net_remove(net, "igmp"); 2668 proc_net_remove(net, "igmp");
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index ee16475f8fc..8da6429269d 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -529,6 +529,8 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
529 syn_ack_recalc(req, thresh, max_retries, 529 syn_ack_recalc(req, thresh, max_retries,
530 queue->rskq_defer_accept, 530 queue->rskq_defer_accept,
531 &expire, &resend); 531 &expire, &resend);
532 if (req->rsk_ops->syn_ack_timeout)
533 req->rsk_ops->syn_ack_timeout(parent, req);
532 if (!expire && 534 if (!expire &&
533 (!resend || 535 (!resend ||
534 !req->rsk_ops->rtx_syn_ack(parent, req, NULL) || 536 !req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 86964b353c3..b59430bc041 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -32,6 +32,8 @@
32#include <linux/netdevice.h> 32#include <linux/netdevice.h>
33#include <linux/jhash.h> 33#include <linux/jhash.h>
34#include <linux/random.h> 34#include <linux/random.h>
35#include <net/route.h>
36#include <net/dst.h>
35#include <net/sock.h> 37#include <net/sock.h>
36#include <net/ip.h> 38#include <net/ip.h>
37#include <net/icmp.h> 39#include <net/icmp.h>
@@ -205,11 +207,34 @@ static void ip_expire(unsigned long arg)
205 if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { 207 if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
206 struct sk_buff *head = qp->q.fragments; 208 struct sk_buff *head = qp->q.fragments;
207 209
208 /* Send an ICMP "Fragment Reassembly Timeout" message. */
209 rcu_read_lock(); 210 rcu_read_lock();
210 head->dev = dev_get_by_index_rcu(net, qp->iif); 211 head->dev = dev_get_by_index_rcu(net, qp->iif);
211 if (head->dev) 212 if (!head->dev)
212 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); 213 goto out_rcu_unlock;
214
215 /*
216 * Only search router table for the head fragment,
217 * when defraging timeout at PRE_ROUTING HOOK.
218 */
219 if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) {
220 const struct iphdr *iph = ip_hdr(head);
221 int err = ip_route_input(head, iph->daddr, iph->saddr,
222 iph->tos, head->dev);
223 if (unlikely(err))
224 goto out_rcu_unlock;
225
226 /*
227 * Only an end host needs to send an ICMP
228 * "Fragment Reassembly Timeout" message, per RFC792.
229 */
230 if (skb_rtable(head)->rt_type != RTN_LOCAL)
231 goto out_rcu_unlock;
232
233 }
234
235 /* Send an ICMP "Fragment Reassembly Timeout" message. */
236 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
237out_rcu_unlock:
213 rcu_read_unlock(); 238 rcu_read_unlock();
214 } 239 }
215out: 240out:
@@ -646,7 +671,7 @@ static struct ctl_table ip4_frags_ctl_table[] = {
646 { } 671 { }
647}; 672};
648 673
649static int ip4_frags_ns_ctl_register(struct net *net) 674static int __net_init ip4_frags_ns_ctl_register(struct net *net)
650{ 675{
651 struct ctl_table *table; 676 struct ctl_table *table;
652 struct ctl_table_header *hdr; 677 struct ctl_table_header *hdr;
@@ -676,7 +701,7 @@ err_alloc:
676 return -ENOMEM; 701 return -ENOMEM;
677} 702}
678 703
679static void ip4_frags_ns_ctl_unregister(struct net *net) 704static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
680{ 705{
681 struct ctl_table *table; 706 struct ctl_table *table;
682 707
@@ -704,7 +729,7 @@ static inline void ip4_frags_ctl_register(void)
704} 729}
705#endif 730#endif
706 731
707static int ipv4_frags_init_net(struct net *net) 732static int __net_init ipv4_frags_init_net(struct net *net)
708{ 733{
709 /* 734 /*
710 * Fragment cache limits. We will commit 256K at one time. Should we 735 * Fragment cache limits. We will commit 256K at one time. Should we
@@ -726,7 +751,7 @@ static int ipv4_frags_init_net(struct net *net)
726 return ip4_frags_ns_ctl_register(net); 751 return ip4_frags_ns_ctl_register(net);
727} 752}
728 753
729static void ipv4_frags_exit_net(struct net *net) 754static void __net_exit ipv4_frags_exit_net(struct net *net)
730{ 755{
731 ip4_frags_ns_ctl_unregister(net); 756 ip4_frags_ns_ctl_unregister(net);
732 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); 757 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f36ce156cac..c0c5274d027 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -793,7 +793,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
793 } 793 }
794 794
795 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) { 795 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
796 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 796 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
797 ip_rt_put(rt); 797 ip_rt_put(rt);
798 goto tx_error; 798 goto tx_error;
799 } 799 }
@@ -1307,7 +1307,7 @@ static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1307 } 1307 }
1308} 1308}
1309 1309
1310static int ipgre_init_net(struct net *net) 1310static int __net_init ipgre_init_net(struct net *net)
1311{ 1311{
1312 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 1312 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1313 int err; 1313 int err;
@@ -1334,7 +1334,7 @@ err_alloc_dev:
1334 return err; 1334 return err;
1335} 1335}
1336 1336
1337static void ipgre_exit_net(struct net *net) 1337static void __net_exit ipgre_exit_net(struct net *net)
1338{ 1338{
1339 struct ipgre_net *ign; 1339 struct ipgre_net *ign;
1340 LIST_HEAD(list); 1340 LIST_HEAD(list);
@@ -1665,14 +1665,15 @@ static int __init ipgre_init(void)
1665 1665
1666 printk(KERN_INFO "GRE over IPv4 tunneling driver\n"); 1666 printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
1667 1667
1668 if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) {
1669 printk(KERN_INFO "ipgre init: can't add protocol\n");
1670 return -EAGAIN;
1671 }
1672
1673 err = register_pernet_device(&ipgre_net_ops); 1668 err = register_pernet_device(&ipgre_net_ops);
1674 if (err < 0) 1669 if (err < 0)
1675 goto gen_device_failed; 1670 return err;
1671
1672 err = inet_add_protocol(&ipgre_protocol, IPPROTO_GRE);
1673 if (err < 0) {
1674 printk(KERN_INFO "ipgre init: can't add protocol\n");
1675 goto add_proto_failed;
1676 }
1676 1677
1677 err = rtnl_link_register(&ipgre_link_ops); 1678 err = rtnl_link_register(&ipgre_link_ops);
1678 if (err < 0) 1679 if (err < 0)
@@ -1688,9 +1689,9 @@ out:
1688tap_ops_failed: 1689tap_ops_failed:
1689 rtnl_link_unregister(&ipgre_link_ops); 1690 rtnl_link_unregister(&ipgre_link_ops);
1690rtnl_link_failed: 1691rtnl_link_failed:
1691 unregister_pernet_device(&ipgre_net_ops);
1692gen_device_failed:
1693 inet_del_protocol(&ipgre_protocol, IPPROTO_GRE); 1692 inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
1693add_proto_failed:
1694 unregister_pernet_device(&ipgre_net_ops);
1694 goto out; 1695 goto out;
1695} 1696}
1696 1697
@@ -1698,9 +1699,9 @@ static void __exit ipgre_fini(void)
1698{ 1699{
1699 rtnl_link_unregister(&ipgre_tap_ops); 1700 rtnl_link_unregister(&ipgre_tap_ops);
1700 rtnl_link_unregister(&ipgre_link_ops); 1701 rtnl_link_unregister(&ipgre_link_ops);
1701 unregister_pernet_device(&ipgre_net_ops);
1702 if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) 1702 if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
1703 printk(KERN_INFO "ipgre close: can't remove protocol\n"); 1703 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1704 unregister_pernet_device(&ipgre_net_ops);
1704} 1705}
1705 1706
1706module_init(ipgre_init); 1707module_init(ipgre_init);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index cafad9baff0..644dc43a55d 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -451,7 +451,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
451 (1<<IP_TTL) | (1<<IP_HDRINCL) | 451 (1<<IP_TTL) | (1<<IP_HDRINCL) |
452 (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) | 452 (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
453 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | 453 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
454 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) || 454 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
455 (1<<IP_MINTTL))) ||
455 optname == IP_MULTICAST_TTL || 456 optname == IP_MULTICAST_TTL ||
456 optname == IP_MULTICAST_ALL || 457 optname == IP_MULTICAST_ALL ||
457 optname == IP_MULTICAST_LOOP || 458 optname == IP_MULTICAST_LOOP ||
@@ -936,6 +937,14 @@ mc_msf_out:
936 inet->transparent = !!val; 937 inet->transparent = !!val;
937 break; 938 break;
938 939
940 case IP_MINTTL:
941 if (optlen < 1)
942 goto e_inval;
943 if (val < 0 || val > 255)
944 goto e_inval;
945 inet->min_ttl = val;
946 break;
947
939 default: 948 default:
940 err = -ENOPROTOOPT; 949 err = -ENOPROTOOPT;
941 break; 950 break;
@@ -1198,6 +1207,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1198 case IP_TRANSPARENT: 1207 case IP_TRANSPARENT:
1199 val = inet->transparent; 1208 val = inet->transparent;
1200 break; 1209 break;
1210 case IP_MINTTL:
1211 val = inet->min_ttl;
1212 break;
1201 default: 1213 default:
1202 release_sock(sk); 1214 release_sock(sk);
1203 return -ENOPROTOOPT; 1215 return -ENOPROTOOPT;
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 544ce0876f1..629067571f0 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -25,6 +25,7 @@
25 25
26static void ipcomp4_err(struct sk_buff *skb, u32 info) 26static void ipcomp4_err(struct sk_buff *skb, u32 info)
27{ 27{
28 struct net *net = dev_net(skb->dev);
28 __be32 spi; 29 __be32 spi;
29 struct iphdr *iph = (struct iphdr *)skb->data; 30 struct iphdr *iph = (struct iphdr *)skb->data;
30 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); 31 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
@@ -35,7 +36,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
35 return; 36 return;
36 37
37 spi = htonl(ntohs(ipch->cpi)); 38 spi = htonl(ntohs(ipch->cpi));
38 x = xfrm_state_lookup(&init_net, (xfrm_address_t *)&iph->daddr, 39 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr,
39 spi, IPPROTO_COMP, AF_INET); 40 spi, IPPROTO_COMP, AF_INET);
40 if (!x) 41 if (!x)
41 return; 42 return;
@@ -47,9 +48,10 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
47/* We always hold one tunnel user reference to indicate a tunnel */ 48/* We always hold one tunnel user reference to indicate a tunnel */
48static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) 49static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
49{ 50{
51 struct net *net = xs_net(x);
50 struct xfrm_state *t; 52 struct xfrm_state *t;
51 53
52 t = xfrm_state_alloc(&init_net); 54 t = xfrm_state_alloc(net);
53 if (t == NULL) 55 if (t == NULL)
54 goto out; 56 goto out;
55 57
@@ -61,6 +63,7 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
61 t->props.mode = x->props.mode; 63 t->props.mode = x->props.mode;
62 t->props.saddr.a4 = x->props.saddr.a4; 64 t->props.saddr.a4 = x->props.saddr.a4;
63 t->props.flags = x->props.flags; 65 t->props.flags = x->props.flags;
66 memcpy(&t->mark, &x->mark, sizeof(t->mark));
64 67
65 if (xfrm_init_state(t)) 68 if (xfrm_init_state(t))
66 goto error; 69 goto error;
@@ -82,10 +85,12 @@ error:
82 */ 85 */
83static int ipcomp_tunnel_attach(struct xfrm_state *x) 86static int ipcomp_tunnel_attach(struct xfrm_state *x)
84{ 87{
88 struct net *net = xs_net(x);
85 int err = 0; 89 int err = 0;
86 struct xfrm_state *t; 90 struct xfrm_state *t;
91 u32 mark = x->mark.v & x->mark.m;
87 92
88 t = xfrm_state_lookup(&init_net, (xfrm_address_t *)&x->id.daddr.a4, 93 t = xfrm_state_lookup(net, mark, (xfrm_address_t *)&x->id.daddr.a4,
89 x->props.saddr.a4, IPPROTO_IPIP, AF_INET); 94 x->props.saddr.a4, IPPROTO_IPIP, AF_INET);
90 if (!t) { 95 if (!t) {
91 t = ipcomp_tunnel_create(x); 96 t = ipcomp_tunnel_create(x);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index eda04fed337..2f302d3ac9a 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -130,7 +130,6 @@ struct ipip_net {
130 struct net_device *fb_tunnel_dev; 130 struct net_device *fb_tunnel_dev;
131}; 131};
132 132
133static void ipip_fb_tunnel_init(struct net_device *dev);
134static void ipip_tunnel_init(struct net_device *dev); 133static void ipip_tunnel_init(struct net_device *dev);
135static void ipip_tunnel_setup(struct net_device *dev); 134static void ipip_tunnel_setup(struct net_device *dev);
136 135
@@ -730,7 +729,7 @@ static void ipip_tunnel_init(struct net_device *dev)
730 ipip_tunnel_bind_dev(dev); 729 ipip_tunnel_bind_dev(dev);
731} 730}
732 731
733static void ipip_fb_tunnel_init(struct net_device *dev) 732static void __net_init ipip_fb_tunnel_init(struct net_device *dev)
734{ 733{
735 struct ip_tunnel *tunnel = netdev_priv(dev); 734 struct ip_tunnel *tunnel = netdev_priv(dev);
736 struct iphdr *iph = &tunnel->parms.iph; 735 struct iphdr *iph = &tunnel->parms.iph;
@@ -773,7 +772,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
773 } 772 }
774} 773}
775 774
776static int ipip_init_net(struct net *net) 775static int __net_init ipip_init_net(struct net *net)
777{ 776{
778 struct ipip_net *ipn = net_generic(net, ipip_net_id); 777 struct ipip_net *ipn = net_generic(net, ipip_net_id);
779 int err; 778 int err;
@@ -806,7 +805,7 @@ err_alloc_dev:
806 return err; 805 return err;
807} 806}
808 807
809static void ipip_exit_net(struct net *net) 808static void __net_exit ipip_exit_net(struct net *net)
810{ 809{
811 struct ipip_net *ipn = net_generic(net, ipip_net_id); 810 struct ipip_net *ipn = net_generic(net, ipip_net_id);
812 LIST_HEAD(list); 811 LIST_HEAD(list);
@@ -831,15 +830,14 @@ static int __init ipip_init(void)
831 830
832 printk(banner); 831 printk(banner);
833 832
834 if (xfrm4_tunnel_register(&ipip_handler, AF_INET)) { 833 err = register_pernet_device(&ipip_net_ops);
834 if (err < 0)
835 return err;
836 err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
837 if (err < 0) {
838 unregister_pernet_device(&ipip_net_ops);
835 printk(KERN_INFO "ipip init: can't register tunnel\n"); 839 printk(KERN_INFO "ipip init: can't register tunnel\n");
836 return -EAGAIN;
837 } 840 }
838
839 err = register_pernet_device(&ipip_net_ops);
840 if (err)
841 xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
842
843 return err; 841 return err;
844} 842}
845 843
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 54596f73eff..8582e12e4a6 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1163,9 +1163,6 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
1163 int ct; 1163 int ct;
1164 LIST_HEAD(list); 1164 LIST_HEAD(list);
1165 1165
1166 if (!net_eq(dev_net(dev), net))
1167 return NOTIFY_DONE;
1168
1169 if (event != NETDEV_UNREGISTER) 1166 if (event != NETDEV_UNREGISTER)
1170 return NOTIFY_DONE; 1167 return NOTIFY_DONE;
1171 v = &net->ipv4.vif_table[0]; 1168 v = &net->ipv4.vif_table[0];
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 90203e1b918..57098dcda29 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -27,6 +27,7 @@
27 27
28#include <linux/netfilter/x_tables.h> 28#include <linux/netfilter/x_tables.h>
29#include <linux/netfilter_arp/arp_tables.h> 29#include <linux/netfilter_arp/arp_tables.h>
30#include "../../netfilter/xt_repldata.h"
30 31
31MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
32MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); 33MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
@@ -58,6 +59,12 @@ do { \
58#define ARP_NF_ASSERT(x) 59#define ARP_NF_ASSERT(x)
59#endif 60#endif
60 61
62void *arpt_alloc_initial_table(const struct xt_table *info)
63{
64 return xt_alloc_initial_table(arpt, ARPT);
65}
66EXPORT_SYMBOL_GPL(arpt_alloc_initial_table);
67
61static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, 68static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
62 const char *hdr_addr, int len) 69 const char *hdr_addr, int len)
63{ 70{
@@ -226,7 +233,14 @@ arpt_error(struct sk_buff *skb, const struct xt_target_param *par)
226 return NF_DROP; 233 return NF_DROP;
227} 234}
228 235
229static inline struct arpt_entry *get_entry(void *base, unsigned int offset) 236static inline const struct arpt_entry_target *
237arpt_get_target_c(const struct arpt_entry *e)
238{
239 return arpt_get_target((struct arpt_entry *)e);
240}
241
242static inline struct arpt_entry *
243get_entry(const void *base, unsigned int offset)
230{ 244{
231 return (struct arpt_entry *)(base + offset); 245 return (struct arpt_entry *)(base + offset);
232} 246}
@@ -273,7 +287,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
273 287
274 arp = arp_hdr(skb); 288 arp = arp_hdr(skb);
275 do { 289 do {
276 struct arpt_entry_target *t; 290 const struct arpt_entry_target *t;
277 int hdr_len; 291 int hdr_len;
278 292
279 if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { 293 if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
@@ -285,7 +299,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
285 (2 * skb->dev->addr_len); 299 (2 * skb->dev->addr_len);
286 ADD_COUNTER(e->counters, hdr_len, 1); 300 ADD_COUNTER(e->counters, hdr_len, 1);
287 301
288 t = arpt_get_target(e); 302 t = arpt_get_target_c(e);
289 303
290 /* Standard target? */ 304 /* Standard target? */
291 if (!t->u.kernel.target->target) { 305 if (!t->u.kernel.target->target) {
@@ -351,7 +365,7 @@ static inline bool unconditional(const struct arpt_arp *arp)
351/* Figures out from what hook each rule can be called: returns 0 if 365/* Figures out from what hook each rule can be called: returns 0 if
352 * there are loops. Puts hook bitmask in comefrom. 366 * there are loops. Puts hook bitmask in comefrom.
353 */ 367 */
354static int mark_source_chains(struct xt_table_info *newinfo, 368static int mark_source_chains(const struct xt_table_info *newinfo,
355 unsigned int valid_hooks, void *entry0) 369 unsigned int valid_hooks, void *entry0)
356{ 370{
357 unsigned int hook; 371 unsigned int hook;
@@ -372,7 +386,7 @@ static int mark_source_chains(struct xt_table_info *newinfo,
372 386
373 for (;;) { 387 for (;;) {
374 const struct arpt_standard_target *t 388 const struct arpt_standard_target *t
375 = (void *)arpt_get_target(e); 389 = (void *)arpt_get_target_c(e);
376 int visited = e->comefrom & (1 << hook); 390 int visited = e->comefrom & (1 << hook);
377 391
378 if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { 392 if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) {
@@ -456,7 +470,7 @@ static int mark_source_chains(struct xt_table_info *newinfo,
456 return 1; 470 return 1;
457} 471}
458 472
459static inline int check_entry(struct arpt_entry *e, const char *name) 473static inline int check_entry(const struct arpt_entry *e, const char *name)
460{ 474{
461 const struct arpt_entry_target *t; 475 const struct arpt_entry_target *t;
462 476
@@ -468,7 +482,7 @@ static inline int check_entry(struct arpt_entry *e, const char *name)
468 if (e->target_offset + sizeof(struct arpt_entry_target) > e->next_offset) 482 if (e->target_offset + sizeof(struct arpt_entry_target) > e->next_offset)
469 return -EINVAL; 483 return -EINVAL;
470 484
471 t = arpt_get_target(e); 485 t = arpt_get_target_c(e);
472 if (e->target_offset + t->u.target_size > e->next_offset) 486 if (e->target_offset + t->u.target_size > e->next_offset)
473 return -EINVAL; 487 return -EINVAL;
474 488
@@ -498,8 +512,7 @@ static inline int check_target(struct arpt_entry *e, const char *name)
498} 512}
499 513
500static inline int 514static inline int
501find_check_entry(struct arpt_entry *e, const char *name, unsigned int size, 515find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
502 unsigned int *i)
503{ 516{
504 struct arpt_entry_target *t; 517 struct arpt_entry_target *t;
505 struct xt_target *target; 518 struct xt_target *target;
@@ -524,8 +537,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
524 ret = check_target(e, name); 537 ret = check_target(e, name);
525 if (ret) 538 if (ret)
526 goto err; 539 goto err;
527
528 (*i)++;
529 return 0; 540 return 0;
530err: 541err:
531 module_put(t->u.kernel.target->me); 542 module_put(t->u.kernel.target->me);
@@ -533,14 +544,14 @@ out:
533 return ret; 544 return ret;
534} 545}
535 546
536static bool check_underflow(struct arpt_entry *e) 547static bool check_underflow(const struct arpt_entry *e)
537{ 548{
538 const struct arpt_entry_target *t; 549 const struct arpt_entry_target *t;
539 unsigned int verdict; 550 unsigned int verdict;
540 551
541 if (!unconditional(&e->arp)) 552 if (!unconditional(&e->arp))
542 return false; 553 return false;
543 t = arpt_get_target(e); 554 t = arpt_get_target_c(e);
544 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) 555 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
545 return false; 556 return false;
546 verdict = ((struct arpt_standard_target *)t)->verdict; 557 verdict = ((struct arpt_standard_target *)t)->verdict;
@@ -550,12 +561,11 @@ static bool check_underflow(struct arpt_entry *e)
550 561
551static inline int check_entry_size_and_hooks(struct arpt_entry *e, 562static inline int check_entry_size_and_hooks(struct arpt_entry *e,
552 struct xt_table_info *newinfo, 563 struct xt_table_info *newinfo,
553 unsigned char *base, 564 const unsigned char *base,
554 unsigned char *limit, 565 const unsigned char *limit,
555 const unsigned int *hook_entries, 566 const unsigned int *hook_entries,
556 const unsigned int *underflows, 567 const unsigned int *underflows,
557 unsigned int valid_hooks, 568 unsigned int valid_hooks)
558 unsigned int *i)
559{ 569{
560 unsigned int h; 570 unsigned int h;
561 571
@@ -592,19 +602,14 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
592 /* Clear counters and comefrom */ 602 /* Clear counters and comefrom */
593 e->counters = ((struct xt_counters) { 0, 0 }); 603 e->counters = ((struct xt_counters) { 0, 0 });
594 e->comefrom = 0; 604 e->comefrom = 0;
595
596 (*i)++;
597 return 0; 605 return 0;
598} 606}
599 607
600static inline int cleanup_entry(struct arpt_entry *e, unsigned int *i) 608static inline void cleanup_entry(struct arpt_entry *e)
601{ 609{
602 struct xt_tgdtor_param par; 610 struct xt_tgdtor_param par;
603 struct arpt_entry_target *t; 611 struct arpt_entry_target *t;
604 612
605 if (i && (*i)-- == 0)
606 return 1;
607
608 t = arpt_get_target(e); 613 t = arpt_get_target(e);
609 par.target = t->u.kernel.target; 614 par.target = t->u.kernel.target;
610 par.targinfo = t->data; 615 par.targinfo = t->data;
@@ -612,26 +617,20 @@ static inline int cleanup_entry(struct arpt_entry *e, unsigned int *i)
612 if (par.target->destroy != NULL) 617 if (par.target->destroy != NULL)
613 par.target->destroy(&par); 618 par.target->destroy(&par);
614 module_put(par.target->me); 619 module_put(par.target->me);
615 return 0;
616} 620}
617 621
618/* Checks and translates the user-supplied table segment (held in 622/* Checks and translates the user-supplied table segment (held in
619 * newinfo). 623 * newinfo).
620 */ 624 */
621static int translate_table(const char *name, 625static int translate_table(struct xt_table_info *newinfo, void *entry0,
622 unsigned int valid_hooks, 626 const struct arpt_replace *repl)
623 struct xt_table_info *newinfo,
624 void *entry0,
625 unsigned int size,
626 unsigned int number,
627 const unsigned int *hook_entries,
628 const unsigned int *underflows)
629{ 627{
628 struct arpt_entry *iter;
630 unsigned int i; 629 unsigned int i;
631 int ret; 630 int ret = 0;
632 631
633 newinfo->size = size; 632 newinfo->size = repl->size;
634 newinfo->number = number; 633 newinfo->number = repl->num_entries;
635 634
636 /* Init all hooks to impossible value. */ 635 /* Init all hooks to impossible value. */
637 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 636 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
@@ -643,52 +642,61 @@ static int translate_table(const char *name,
643 i = 0; 642 i = 0;
644 643
645 /* Walk through entries, checking offsets. */ 644 /* Walk through entries, checking offsets. */
646 ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size, 645 xt_entry_foreach(iter, entry0, newinfo->size) {
647 check_entry_size_and_hooks, 646 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
648 newinfo, 647 entry0 + repl->size, repl->hook_entry, repl->underflow,
649 entry0, 648 repl->valid_hooks);
650 entry0 + size, 649 if (ret != 0)
651 hook_entries, underflows, valid_hooks, &i); 650 break;
651 ++i;
652 }
652 duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); 653 duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
653 if (ret != 0) 654 if (ret != 0)
654 return ret; 655 return ret;
655 656
656 if (i != number) { 657 if (i != repl->num_entries) {
657 duprintf("translate_table: %u not %u entries\n", 658 duprintf("translate_table: %u not %u entries\n",
658 i, number); 659 i, repl->num_entries);
659 return -EINVAL; 660 return -EINVAL;
660 } 661 }
661 662
662 /* Check hooks all assigned */ 663 /* Check hooks all assigned */
663 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 664 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
664 /* Only hooks which are valid */ 665 /* Only hooks which are valid */
665 if (!(valid_hooks & (1 << i))) 666 if (!(repl->valid_hooks & (1 << i)))
666 continue; 667 continue;
667 if (newinfo->hook_entry[i] == 0xFFFFFFFF) { 668 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
668 duprintf("Invalid hook entry %u %u\n", 669 duprintf("Invalid hook entry %u %u\n",
669 i, hook_entries[i]); 670 i, repl->hook_entry[i]);
670 return -EINVAL; 671 return -EINVAL;
671 } 672 }
672 if (newinfo->underflow[i] == 0xFFFFFFFF) { 673 if (newinfo->underflow[i] == 0xFFFFFFFF) {
673 duprintf("Invalid underflow %u %u\n", 674 duprintf("Invalid underflow %u %u\n",
674 i, underflows[i]); 675 i, repl->underflow[i]);
675 return -EINVAL; 676 return -EINVAL;
676 } 677 }
677 } 678 }
678 679
679 if (!mark_source_chains(newinfo, valid_hooks, entry0)) { 680 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
680 duprintf("Looping hook\n"); 681 duprintf("Looping hook\n");
681 return -ELOOP; 682 return -ELOOP;
682 } 683 }
683 684
684 /* Finally, each sanity check must pass */ 685 /* Finally, each sanity check must pass */
685 i = 0; 686 i = 0;
686 ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size, 687 xt_entry_foreach(iter, entry0, newinfo->size) {
687 find_check_entry, name, size, &i); 688 ret = find_check_entry(iter, repl->name, repl->size);
689 if (ret != 0)
690 break;
691 ++i;
692 }
688 693
689 if (ret != 0) { 694 if (ret != 0) {
690 ARPT_ENTRY_ITERATE(entry0, newinfo->size, 695 xt_entry_foreach(iter, entry0, newinfo->size) {
691 cleanup_entry, &i); 696 if (i-- == 0)
697 break;
698 cleanup_entry(iter);
699 }
692 return ret; 700 return ret;
693 } 701 }
694 702
@@ -701,30 +709,10 @@ static int translate_table(const char *name,
701 return ret; 709 return ret;
702} 710}
703 711
704/* Gets counters. */
705static inline int add_entry_to_counter(const struct arpt_entry *e,
706 struct xt_counters total[],
707 unsigned int *i)
708{
709 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
710
711 (*i)++;
712 return 0;
713}
714
715static inline int set_entry_to_counter(const struct arpt_entry *e,
716 struct xt_counters total[],
717 unsigned int *i)
718{
719 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
720
721 (*i)++;
722 return 0;
723}
724
725static void get_counters(const struct xt_table_info *t, 712static void get_counters(const struct xt_table_info *t,
726 struct xt_counters counters[]) 713 struct xt_counters counters[])
727{ 714{
715 struct arpt_entry *iter;
728 unsigned int cpu; 716 unsigned int cpu;
729 unsigned int i; 717 unsigned int i;
730 unsigned int curcpu; 718 unsigned int curcpu;
@@ -740,32 +728,32 @@ static void get_counters(const struct xt_table_info *t,
740 curcpu = smp_processor_id(); 728 curcpu = smp_processor_id();
741 729
742 i = 0; 730 i = 0;
743 ARPT_ENTRY_ITERATE(t->entries[curcpu], 731 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
744 t->size, 732 SET_COUNTER(counters[i], iter->counters.bcnt,
745 set_entry_to_counter, 733 iter->counters.pcnt);
746 counters, 734 ++i;
747 &i); 735 }
748 736
749 for_each_possible_cpu(cpu) { 737 for_each_possible_cpu(cpu) {
750 if (cpu == curcpu) 738 if (cpu == curcpu)
751 continue; 739 continue;
752 i = 0; 740 i = 0;
753 xt_info_wrlock(cpu); 741 xt_info_wrlock(cpu);
754 ARPT_ENTRY_ITERATE(t->entries[cpu], 742 xt_entry_foreach(iter, t->entries[cpu], t->size) {
755 t->size, 743 ADD_COUNTER(counters[i], iter->counters.bcnt,
756 add_entry_to_counter, 744 iter->counters.pcnt);
757 counters, 745 ++i;
758 &i); 746 }
759 xt_info_wrunlock(cpu); 747 xt_info_wrunlock(cpu);
760 } 748 }
761 local_bh_enable(); 749 local_bh_enable();
762} 750}
763 751
764static struct xt_counters *alloc_counters(struct xt_table *table) 752static struct xt_counters *alloc_counters(const struct xt_table *table)
765{ 753{
766 unsigned int countersize; 754 unsigned int countersize;
767 struct xt_counters *counters; 755 struct xt_counters *counters;
768 struct xt_table_info *private = table->private; 756 const struct xt_table_info *private = table->private;
769 757
770 /* We need atomic snapshot of counters: rest doesn't change 758 /* We need atomic snapshot of counters: rest doesn't change
771 * (other than comefrom, which userspace doesn't care 759 * (other than comefrom, which userspace doesn't care
@@ -783,11 +771,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
783} 771}
784 772
785static int copy_entries_to_user(unsigned int total_size, 773static int copy_entries_to_user(unsigned int total_size,
786 struct xt_table *table, 774 const struct xt_table *table,
787 void __user *userptr) 775 void __user *userptr)
788{ 776{
789 unsigned int off, num; 777 unsigned int off, num;
790 struct arpt_entry *e; 778 const struct arpt_entry *e;
791 struct xt_counters *counters; 779 struct xt_counters *counters;
792 struct xt_table_info *private = table->private; 780 struct xt_table_info *private = table->private;
793 int ret = 0; 781 int ret = 0;
@@ -807,7 +795,7 @@ static int copy_entries_to_user(unsigned int total_size,
807 /* FIXME: use iterator macros --RR */ 795 /* FIXME: use iterator macros --RR */
808 /* ... then go back and fix counters and names */ 796 /* ... then go back and fix counters and names */
809 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ 797 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
810 struct arpt_entry_target *t; 798 const struct arpt_entry_target *t;
811 799
812 e = (struct arpt_entry *)(loc_cpu_entry + off); 800 e = (struct arpt_entry *)(loc_cpu_entry + off);
813 if (copy_to_user(userptr + off 801 if (copy_to_user(userptr + off
@@ -818,7 +806,7 @@ static int copy_entries_to_user(unsigned int total_size,
818 goto free_counters; 806 goto free_counters;
819 } 807 }
820 808
821 t = arpt_get_target(e); 809 t = arpt_get_target_c(e);
822 if (copy_to_user(userptr + off + e->target_offset 810 if (copy_to_user(userptr + off + e->target_offset
823 + offsetof(struct arpt_entry_target, 811 + offsetof(struct arpt_entry_target,
824 u.user.name), 812 u.user.name),
@@ -835,7 +823,7 @@ static int copy_entries_to_user(unsigned int total_size,
835} 823}
836 824
837#ifdef CONFIG_COMPAT 825#ifdef CONFIG_COMPAT
838static void compat_standard_from_user(void *dst, void *src) 826static void compat_standard_from_user(void *dst, const void *src)
839{ 827{
840 int v = *(compat_int_t *)src; 828 int v = *(compat_int_t *)src;
841 829
@@ -844,7 +832,7 @@ static void compat_standard_from_user(void *dst, void *src)
844 memcpy(dst, &v, sizeof(v)); 832 memcpy(dst, &v, sizeof(v));
845} 833}
846 834
847static int compat_standard_to_user(void __user *dst, void *src) 835static int compat_standard_to_user(void __user *dst, const void *src)
848{ 836{
849 compat_int_t cv = *(int *)src; 837 compat_int_t cv = *(int *)src;
850 838
@@ -853,18 +841,18 @@ static int compat_standard_to_user(void __user *dst, void *src)
853 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; 841 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
854} 842}
855 843
856static int compat_calc_entry(struct arpt_entry *e, 844static int compat_calc_entry(const struct arpt_entry *e,
857 const struct xt_table_info *info, 845 const struct xt_table_info *info,
858 void *base, struct xt_table_info *newinfo) 846 const void *base, struct xt_table_info *newinfo)
859{ 847{
860 struct arpt_entry_target *t; 848 const struct arpt_entry_target *t;
861 unsigned int entry_offset; 849 unsigned int entry_offset;
862 int off, i, ret; 850 int off, i, ret;
863 851
864 off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); 852 off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
865 entry_offset = (void *)e - base; 853 entry_offset = (void *)e - base;
866 854
867 t = arpt_get_target(e); 855 t = arpt_get_target_c(e);
868 off += xt_compat_target_offset(t->u.kernel.target); 856 off += xt_compat_target_offset(t->u.kernel.target);
869 newinfo->size -= off; 857 newinfo->size -= off;
870 ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); 858 ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);
@@ -885,7 +873,9 @@ static int compat_calc_entry(struct arpt_entry *e,
885static int compat_table_info(const struct xt_table_info *info, 873static int compat_table_info(const struct xt_table_info *info,
886 struct xt_table_info *newinfo) 874 struct xt_table_info *newinfo)
887{ 875{
876 struct arpt_entry *iter;
888 void *loc_cpu_entry; 877 void *loc_cpu_entry;
878 int ret;
889 879
890 if (!newinfo || !info) 880 if (!newinfo || !info)
891 return -EINVAL; 881 return -EINVAL;
@@ -894,13 +884,17 @@ static int compat_table_info(const struct xt_table_info *info,
894 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); 884 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
895 newinfo->initial_entries = 0; 885 newinfo->initial_entries = 0;
896 loc_cpu_entry = info->entries[raw_smp_processor_id()]; 886 loc_cpu_entry = info->entries[raw_smp_processor_id()];
897 return ARPT_ENTRY_ITERATE(loc_cpu_entry, info->size, 887 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
898 compat_calc_entry, info, loc_cpu_entry, 888 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
899 newinfo); 889 if (ret != 0)
890 return ret;
891 }
892 return 0;
900} 893}
901#endif 894#endif
902 895
903static int get_info(struct net *net, void __user *user, int *len, int compat) 896static int get_info(struct net *net, void __user *user,
897 const int *len, int compat)
904{ 898{
905 char name[ARPT_TABLE_MAXNAMELEN]; 899 char name[ARPT_TABLE_MAXNAMELEN];
906 struct xt_table *t; 900 struct xt_table *t;
@@ -959,7 +953,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
959} 953}
960 954
961static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, 955static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
962 int *len) 956 const int *len)
963{ 957{
964 int ret; 958 int ret;
965 struct arpt_get_entries get; 959 struct arpt_get_entries get;
@@ -1010,6 +1004,7 @@ static int __do_replace(struct net *net, const char *name,
1010 struct xt_table_info *oldinfo; 1004 struct xt_table_info *oldinfo;
1011 struct xt_counters *counters; 1005 struct xt_counters *counters;
1012 void *loc_cpu_old_entry; 1006 void *loc_cpu_old_entry;
1007 struct arpt_entry *iter;
1013 1008
1014 ret = 0; 1009 ret = 0;
1015 counters = vmalloc_node(num_counters * sizeof(struct xt_counters), 1010 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
@@ -1053,8 +1048,8 @@ static int __do_replace(struct net *net, const char *name,
1053 1048
1054 /* Decrease module usage counts and free resource */ 1049 /* Decrease module usage counts and free resource */
1055 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1050 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1056 ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, 1051 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1057 NULL); 1052 cleanup_entry(iter);
1058 1053
1059 xt_free_table_info(oldinfo); 1054 xt_free_table_info(oldinfo);
1060 if (copy_to_user(counters_ptr, counters, 1055 if (copy_to_user(counters_ptr, counters,
@@ -1073,12 +1068,14 @@ static int __do_replace(struct net *net, const char *name,
1073 return ret; 1068 return ret;
1074} 1069}
1075 1070
1076static int do_replace(struct net *net, void __user *user, unsigned int len) 1071static int do_replace(struct net *net, const void __user *user,
1072 unsigned int len)
1077{ 1073{
1078 int ret; 1074 int ret;
1079 struct arpt_replace tmp; 1075 struct arpt_replace tmp;
1080 struct xt_table_info *newinfo; 1076 struct xt_table_info *newinfo;
1081 void *loc_cpu_entry; 1077 void *loc_cpu_entry;
1078 struct arpt_entry *iter;
1082 1079
1083 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1080 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1084 return -EFAULT; 1081 return -EFAULT;
@@ -1099,9 +1096,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1099 goto free_newinfo; 1096 goto free_newinfo;
1100 } 1097 }
1101 1098
1102 ret = translate_table(tmp.name, tmp.valid_hooks, 1099 ret = translate_table(newinfo, loc_cpu_entry, &tmp);
1103 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1104 tmp.hook_entry, tmp.underflow);
1105 if (ret != 0) 1100 if (ret != 0)
1106 goto free_newinfo; 1101 goto free_newinfo;
1107 1102
@@ -1114,27 +1109,15 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1114 return 0; 1109 return 0;
1115 1110
1116 free_newinfo_untrans: 1111 free_newinfo_untrans:
1117 ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1112 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1113 cleanup_entry(iter);
1118 free_newinfo: 1114 free_newinfo:
1119 xt_free_table_info(newinfo); 1115 xt_free_table_info(newinfo);
1120 return ret; 1116 return ret;
1121} 1117}
1122 1118
1123/* We're lazy, and add to the first CPU; overflow works its fey magic 1119static int do_add_counters(struct net *net, const void __user *user,
1124 * and everything is OK. */ 1120 unsigned int len, int compat)
1125static int
1126add_counter_to_entry(struct arpt_entry *e,
1127 const struct xt_counters addme[],
1128 unsigned int *i)
1129{
1130 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1131
1132 (*i)++;
1133 return 0;
1134}
1135
1136static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1137 int compat)
1138{ 1121{
1139 unsigned int i, curcpu; 1122 unsigned int i, curcpu;
1140 struct xt_counters_info tmp; 1123 struct xt_counters_info tmp;
@@ -1147,6 +1130,7 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1147 const struct xt_table_info *private; 1130 const struct xt_table_info *private;
1148 int ret = 0; 1131 int ret = 0;
1149 void *loc_cpu_entry; 1132 void *loc_cpu_entry;
1133 struct arpt_entry *iter;
1150#ifdef CONFIG_COMPAT 1134#ifdef CONFIG_COMPAT
1151 struct compat_xt_counters_info compat_tmp; 1135 struct compat_xt_counters_info compat_tmp;
1152 1136
@@ -1204,11 +1188,10 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1204 curcpu = smp_processor_id(); 1188 curcpu = smp_processor_id();
1205 loc_cpu_entry = private->entries[curcpu]; 1189 loc_cpu_entry = private->entries[curcpu];
1206 xt_info_wrlock(curcpu); 1190 xt_info_wrlock(curcpu);
1207 ARPT_ENTRY_ITERATE(loc_cpu_entry, 1191 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1208 private->size, 1192 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1209 add_counter_to_entry, 1193 ++i;
1210 paddc, 1194 }
1211 &i);
1212 xt_info_wrunlock(curcpu); 1195 xt_info_wrunlock(curcpu);
1213 unlock_up_free: 1196 unlock_up_free:
1214 local_bh_enable(); 1197 local_bh_enable();
@@ -1221,28 +1204,22 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
1221} 1204}
1222 1205
1223#ifdef CONFIG_COMPAT 1206#ifdef CONFIG_COMPAT
1224static inline int 1207static inline void compat_release_entry(struct compat_arpt_entry *e)
1225compat_release_entry(struct compat_arpt_entry *e, unsigned int *i)
1226{ 1208{
1227 struct arpt_entry_target *t; 1209 struct arpt_entry_target *t;
1228 1210
1229 if (i && (*i)-- == 0)
1230 return 1;
1231
1232 t = compat_arpt_get_target(e); 1211 t = compat_arpt_get_target(e);
1233 module_put(t->u.kernel.target->me); 1212 module_put(t->u.kernel.target->me);
1234 return 0;
1235} 1213}
1236 1214
1237static inline int 1215static inline int
1238check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, 1216check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
1239 struct xt_table_info *newinfo, 1217 struct xt_table_info *newinfo,
1240 unsigned int *size, 1218 unsigned int *size,
1241 unsigned char *base, 1219 const unsigned char *base,
1242 unsigned char *limit, 1220 const unsigned char *limit,
1243 unsigned int *hook_entries, 1221 const unsigned int *hook_entries,
1244 unsigned int *underflows, 1222 const unsigned int *underflows,
1245 unsigned int *i,
1246 const char *name) 1223 const char *name)
1247{ 1224{
1248 struct arpt_entry_target *t; 1225 struct arpt_entry_target *t;
@@ -1302,8 +1279,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
1302 /* Clear counters and comefrom */ 1279 /* Clear counters and comefrom */
1303 memset(&e->counters, 0, sizeof(e->counters)); 1280 memset(&e->counters, 0, sizeof(e->counters));
1304 e->comefrom = 0; 1281 e->comefrom = 0;
1305
1306 (*i)++;
1307 return 0; 1282 return 0;
1308 1283
1309release_target: 1284release_target:
@@ -1347,19 +1322,6 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
1347 return ret; 1322 return ret;
1348} 1323}
1349 1324
1350static inline int compat_check_entry(struct arpt_entry *e, const char *name,
1351 unsigned int *i)
1352{
1353 int ret;
1354
1355 ret = check_target(e, name);
1356 if (ret)
1357 return ret;
1358
1359 (*i)++;
1360 return 0;
1361}
1362
1363static int translate_compat_table(const char *name, 1325static int translate_compat_table(const char *name,
1364 unsigned int valid_hooks, 1326 unsigned int valid_hooks,
1365 struct xt_table_info **pinfo, 1327 struct xt_table_info **pinfo,
@@ -1372,8 +1334,10 @@ static int translate_compat_table(const char *name,
1372 unsigned int i, j; 1334 unsigned int i, j;
1373 struct xt_table_info *newinfo, *info; 1335 struct xt_table_info *newinfo, *info;
1374 void *pos, *entry0, *entry1; 1336 void *pos, *entry0, *entry1;
1337 struct compat_arpt_entry *iter0;
1338 struct arpt_entry *iter1;
1375 unsigned int size; 1339 unsigned int size;
1376 int ret; 1340 int ret = 0;
1377 1341
1378 info = *pinfo; 1342 info = *pinfo;
1379 entry0 = *pentry0; 1343 entry0 = *pentry0;
@@ -1390,13 +1354,14 @@ static int translate_compat_table(const char *name,
1390 j = 0; 1354 j = 0;
1391 xt_compat_lock(NFPROTO_ARP); 1355 xt_compat_lock(NFPROTO_ARP);
1392 /* Walk through entries, checking offsets. */ 1356 /* Walk through entries, checking offsets. */
1393 ret = COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size, 1357 xt_entry_foreach(iter0, entry0, total_size) {
1394 check_compat_entry_size_and_hooks, 1358 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1395 info, &size, entry0, 1359 entry0, entry0 + total_size, hook_entries, underflows,
1396 entry0 + total_size, 1360 name);
1397 hook_entries, underflows, &j, name); 1361 if (ret != 0)
1398 if (ret != 0) 1362 goto out_unlock;
1399 goto out_unlock; 1363 ++j;
1364 }
1400 1365
1401 ret = -EINVAL; 1366 ret = -EINVAL;
1402 if (j != number) { 1367 if (j != number) {
@@ -1435,9 +1400,12 @@ static int translate_compat_table(const char *name,
1435 entry1 = newinfo->entries[raw_smp_processor_id()]; 1400 entry1 = newinfo->entries[raw_smp_processor_id()];
1436 pos = entry1; 1401 pos = entry1;
1437 size = total_size; 1402 size = total_size;
1438 ret = COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size, 1403 xt_entry_foreach(iter0, entry0, total_size) {
1439 compat_copy_entry_from_user, 1404 ret = compat_copy_entry_from_user(iter0, &pos,
1440 &pos, &size, name, newinfo, entry1); 1405 &size, name, newinfo, entry1);
1406 if (ret != 0)
1407 break;
1408 }
1441 xt_compat_flush_offsets(NFPROTO_ARP); 1409 xt_compat_flush_offsets(NFPROTO_ARP);
1442 xt_compat_unlock(NFPROTO_ARP); 1410 xt_compat_unlock(NFPROTO_ARP);
1443 if (ret) 1411 if (ret)
@@ -1448,13 +1416,32 @@ static int translate_compat_table(const char *name,
1448 goto free_newinfo; 1416 goto free_newinfo;
1449 1417
1450 i = 0; 1418 i = 0;
1451 ret = ARPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry, 1419 xt_entry_foreach(iter1, entry1, newinfo->size) {
1452 name, &i); 1420 ret = check_target(iter1, name);
1421 if (ret != 0)
1422 break;
1423 ++i;
1424 }
1453 if (ret) { 1425 if (ret) {
1426 /*
1427 * The first i matches need cleanup_entry (calls ->destroy)
1428 * because they had called ->check already. The other j-i
1429 * entries need only release.
1430 */
1431 int skip = i;
1454 j -= i; 1432 j -= i;
1455 COMPAT_ARPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i, 1433 xt_entry_foreach(iter0, entry0, newinfo->size) {
1456 compat_release_entry, &j); 1434 if (skip-- > 0)
1457 ARPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i); 1435 continue;
1436 if (j-- == 0)
1437 break;
1438 compat_release_entry(iter0);
1439 }
1440 xt_entry_foreach(iter1, entry1, newinfo->size) {
1441 if (i-- == 0)
1442 break;
1443 cleanup_entry(iter1);
1444 }
1458 xt_free_table_info(newinfo); 1445 xt_free_table_info(newinfo);
1459 return ret; 1446 return ret;
1460 } 1447 }
@@ -1472,7 +1459,11 @@ static int translate_compat_table(const char *name,
1472free_newinfo: 1459free_newinfo:
1473 xt_free_table_info(newinfo); 1460 xt_free_table_info(newinfo);
1474out: 1461out:
1475 COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j); 1462 xt_entry_foreach(iter0, entry0, total_size) {
1463 if (j-- == 0)
1464 break;
1465 compat_release_entry(iter0);
1466 }
1476 return ret; 1467 return ret;
1477out_unlock: 1468out_unlock:
1478 xt_compat_flush_offsets(NFPROTO_ARP); 1469 xt_compat_flush_offsets(NFPROTO_ARP);
@@ -1499,6 +1490,7 @@ static int compat_do_replace(struct net *net, void __user *user,
1499 struct compat_arpt_replace tmp; 1490 struct compat_arpt_replace tmp;
1500 struct xt_table_info *newinfo; 1491 struct xt_table_info *newinfo;
1501 void *loc_cpu_entry; 1492 void *loc_cpu_entry;
1493 struct arpt_entry *iter;
1502 1494
1503 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1495 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1504 return -EFAULT; 1496 return -EFAULT;
@@ -1536,7 +1528,8 @@ static int compat_do_replace(struct net *net, void __user *user,
1536 return 0; 1528 return 0;
1537 1529
1538 free_newinfo_untrans: 1530 free_newinfo_untrans:
1539 ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1531 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1532 cleanup_entry(iter);
1540 free_newinfo: 1533 free_newinfo:
1541 xt_free_table_info(newinfo); 1534 xt_free_table_info(newinfo);
1542 return ret; 1535 return ret;
@@ -1570,7 +1563,7 @@ static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user,
1570static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr, 1563static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
1571 compat_uint_t *size, 1564 compat_uint_t *size,
1572 struct xt_counters *counters, 1565 struct xt_counters *counters,
1573 unsigned int *i) 1566 unsigned int i)
1574{ 1567{
1575 struct arpt_entry_target *t; 1568 struct arpt_entry_target *t;
1576 struct compat_arpt_entry __user *ce; 1569 struct compat_arpt_entry __user *ce;
@@ -1578,14 +1571,12 @@ static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
1578 compat_uint_t origsize; 1571 compat_uint_t origsize;
1579 int ret; 1572 int ret;
1580 1573
1581 ret = -EFAULT;
1582 origsize = *size; 1574 origsize = *size;
1583 ce = (struct compat_arpt_entry __user *)*dstptr; 1575 ce = (struct compat_arpt_entry __user *)*dstptr;
1584 if (copy_to_user(ce, e, sizeof(struct arpt_entry))) 1576 if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 ||
1585 goto out; 1577 copy_to_user(&ce->counters, &counters[i],
1586 1578 sizeof(counters[i])) != 0)
1587 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i]))) 1579 return -EFAULT;
1588 goto out;
1589 1580
1590 *dstptr += sizeof(struct compat_arpt_entry); 1581 *dstptr += sizeof(struct compat_arpt_entry);
1591 *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); 1582 *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
@@ -1595,18 +1586,12 @@ static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
1595 t = arpt_get_target(e); 1586 t = arpt_get_target(e);
1596 ret = xt_compat_target_to_user(t, dstptr, size); 1587 ret = xt_compat_target_to_user(t, dstptr, size);
1597 if (ret) 1588 if (ret)
1598 goto out; 1589 return ret;
1599 ret = -EFAULT;
1600 next_offset = e->next_offset - (origsize - *size); 1590 next_offset = e->next_offset - (origsize - *size);
1601 if (put_user(target_offset, &ce->target_offset)) 1591 if (put_user(target_offset, &ce->target_offset) != 0 ||
1602 goto out; 1592 put_user(next_offset, &ce->next_offset) != 0)
1603 if (put_user(next_offset, &ce->next_offset)) 1593 return -EFAULT;
1604 goto out;
1605
1606 (*i)++;
1607 return 0; 1594 return 0;
1608out:
1609 return ret;
1610} 1595}
1611 1596
1612static int compat_copy_entries_to_user(unsigned int total_size, 1597static int compat_copy_entries_to_user(unsigned int total_size,
@@ -1620,6 +1605,7 @@ static int compat_copy_entries_to_user(unsigned int total_size,
1620 int ret = 0; 1605 int ret = 0;
1621 void *loc_cpu_entry; 1606 void *loc_cpu_entry;
1622 unsigned int i = 0; 1607 unsigned int i = 0;
1608 struct arpt_entry *iter;
1623 1609
1624 counters = alloc_counters(table); 1610 counters = alloc_counters(table);
1625 if (IS_ERR(counters)) 1611 if (IS_ERR(counters))
@@ -1629,9 +1615,12 @@ static int compat_copy_entries_to_user(unsigned int total_size,
1629 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1615 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1630 pos = userptr; 1616 pos = userptr;
1631 size = total_size; 1617 size = total_size;
1632 ret = ARPT_ENTRY_ITERATE(loc_cpu_entry, total_size, 1618 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1633 compat_copy_entry_to_user, 1619 ret = compat_copy_entry_to_user(iter, &pos,
1634 &pos, &size, counters, &i); 1620 &size, counters, i++);
1621 if (ret != 0)
1622 break;
1623 }
1635 vfree(counters); 1624 vfree(counters);
1636 return ret; 1625 return ret;
1637} 1626}
@@ -1799,12 +1788,7 @@ struct xt_table *arpt_register_table(struct net *net,
1799 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 1788 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1800 memcpy(loc_cpu_entry, repl->entries, repl->size); 1789 memcpy(loc_cpu_entry, repl->entries, repl->size);
1801 1790
1802 ret = translate_table(table->name, table->valid_hooks, 1791 ret = translate_table(newinfo, loc_cpu_entry, repl);
1803 newinfo, loc_cpu_entry, repl->size,
1804 repl->num_entries,
1805 repl->hook_entry,
1806 repl->underflow);
1807
1808 duprintf("arpt_register_table: translate table gives %d\n", ret); 1792 duprintf("arpt_register_table: translate table gives %d\n", ret);
1809 if (ret != 0) 1793 if (ret != 0)
1810 goto out_free; 1794 goto out_free;
@@ -1827,13 +1811,14 @@ void arpt_unregister_table(struct xt_table *table)
1827 struct xt_table_info *private; 1811 struct xt_table_info *private;
1828 void *loc_cpu_entry; 1812 void *loc_cpu_entry;
1829 struct module *table_owner = table->me; 1813 struct module *table_owner = table->me;
1814 struct arpt_entry *iter;
1830 1815
1831 private = xt_unregister_table(table); 1816 private = xt_unregister_table(table);
1832 1817
1833 /* Decrease module usage counts and free resources */ 1818 /* Decrease module usage counts and free resources */
1834 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1819 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1835 ARPT_ENTRY_ITERATE(loc_cpu_entry, private->size, 1820 xt_entry_foreach(iter, loc_cpu_entry, private->size)
1836 cleanup_entry, NULL); 1821 cleanup_entry(iter);
1837 if (private->number > private->initial_entries) 1822 if (private->number > private->initial_entries)
1838 module_put(table_owner); 1823 module_put(table_owner);
1839 xt_free_table_info(private); 1824 xt_free_table_info(private);
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index 97337601827..bfe26f32b93 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/netfilter/x_tables.h>
9#include <linux/netfilter_arp/arp_tables.h> 10#include <linux/netfilter_arp/arp_tables.h>
10 11
11MODULE_LICENSE("GPL"); 12MODULE_LICENSE("GPL");
@@ -15,93 +16,37 @@ MODULE_DESCRIPTION("arptables filter table");
15#define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \ 16#define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \
16 (1 << NF_ARP_FORWARD)) 17 (1 << NF_ARP_FORWARD))
17 18
18static const struct
19{
20 struct arpt_replace repl;
21 struct arpt_standard entries[3];
22 struct arpt_error term;
23} initial_table __net_initdata = {
24 .repl = {
25 .name = "filter",
26 .valid_hooks = FILTER_VALID_HOOKS,
27 .num_entries = 4,
28 .size = sizeof(struct arpt_standard) * 3 + sizeof(struct arpt_error),
29 .hook_entry = {
30 [NF_ARP_IN] = 0,
31 [NF_ARP_OUT] = sizeof(struct arpt_standard),
32 [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard),
33 },
34 .underflow = {
35 [NF_ARP_IN] = 0,
36 [NF_ARP_OUT] = sizeof(struct arpt_standard),
37 [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard),
38 },
39 },
40 .entries = {
41 ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_IN */
42 ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_OUT */
43 ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_FORWARD */
44 },
45 .term = ARPT_ERROR_INIT,
46};
47
48static const struct xt_table packet_filter = { 19static const struct xt_table packet_filter = {
49 .name = "filter", 20 .name = "filter",
50 .valid_hooks = FILTER_VALID_HOOKS, 21 .valid_hooks = FILTER_VALID_HOOKS,
51 .me = THIS_MODULE, 22 .me = THIS_MODULE,
52 .af = NFPROTO_ARP, 23 .af = NFPROTO_ARP,
24 .priority = NF_IP_PRI_FILTER,
53}; 25};
54 26
55/* The work comes in here from netfilter.c */ 27/* The work comes in here from netfilter.c */
56static unsigned int arpt_in_hook(unsigned int hook, 28static unsigned int
57 struct sk_buff *skb, 29arptable_filter_hook(unsigned int hook, struct sk_buff *skb,
58 const struct net_device *in, 30 const struct net_device *in, const struct net_device *out,
59 const struct net_device *out, 31 int (*okfn)(struct sk_buff *))
60 int (*okfn)(struct sk_buff *))
61{ 32{
62 return arpt_do_table(skb, hook, in, out, 33 const struct net *net = dev_net((in != NULL) ? in : out);
63 dev_net(in)->ipv4.arptable_filter);
64}
65 34
66static unsigned int arpt_out_hook(unsigned int hook, 35 return arpt_do_table(skb, hook, in, out, net->ipv4.arptable_filter);
67 struct sk_buff *skb,
68 const struct net_device *in,
69 const struct net_device *out,
70 int (*okfn)(struct sk_buff *))
71{
72 return arpt_do_table(skb, hook, in, out,
73 dev_net(out)->ipv4.arptable_filter);
74} 36}
75 37
76static struct nf_hook_ops arpt_ops[] __read_mostly = { 38static struct nf_hook_ops *arpfilter_ops __read_mostly;
77 {
78 .hook = arpt_in_hook,
79 .owner = THIS_MODULE,
80 .pf = NFPROTO_ARP,
81 .hooknum = NF_ARP_IN,
82 .priority = NF_IP_PRI_FILTER,
83 },
84 {
85 .hook = arpt_out_hook,
86 .owner = THIS_MODULE,
87 .pf = NFPROTO_ARP,
88 .hooknum = NF_ARP_OUT,
89 .priority = NF_IP_PRI_FILTER,
90 },
91 {
92 .hook = arpt_in_hook,
93 .owner = THIS_MODULE,
94 .pf = NFPROTO_ARP,
95 .hooknum = NF_ARP_FORWARD,
96 .priority = NF_IP_PRI_FILTER,
97 },
98};
99 39
100static int __net_init arptable_filter_net_init(struct net *net) 40static int __net_init arptable_filter_net_init(struct net *net)
101{ 41{
102 /* Register table */ 42 struct arpt_replace *repl;
43
44 repl = arpt_alloc_initial_table(&packet_filter);
45 if (repl == NULL)
46 return -ENOMEM;
103 net->ipv4.arptable_filter = 47 net->ipv4.arptable_filter =
104 arpt_register_table(net, &packet_filter, &initial_table.repl); 48 arpt_register_table(net, &packet_filter, repl);
49 kfree(repl);
105 if (IS_ERR(net->ipv4.arptable_filter)) 50 if (IS_ERR(net->ipv4.arptable_filter))
106 return PTR_ERR(net->ipv4.arptable_filter); 51 return PTR_ERR(net->ipv4.arptable_filter);
107 return 0; 52 return 0;
@@ -125,9 +70,11 @@ static int __init arptable_filter_init(void)
125 if (ret < 0) 70 if (ret < 0)
126 return ret; 71 return ret;
127 72
128 ret = nf_register_hooks(arpt_ops, ARRAY_SIZE(arpt_ops)); 73 arpfilter_ops = xt_hook_link(&packet_filter, arptable_filter_hook);
129 if (ret < 0) 74 if (IS_ERR(arpfilter_ops)) {
75 ret = PTR_ERR(arpfilter_ops);
130 goto cleanup_table; 76 goto cleanup_table;
77 }
131 return ret; 78 return ret;
132 79
133cleanup_table: 80cleanup_table:
@@ -137,7 +84,7 @@ cleanup_table:
137 84
138static void __exit arptable_filter_fini(void) 85static void __exit arptable_filter_fini(void)
139{ 86{
140 nf_unregister_hooks(arpt_ops, ARRAY_SIZE(arpt_ops)); 87 xt_hook_unlink(&packet_filter, arpfilter_ops);
141 unregister_pernet_subsys(&arptable_filter_net_ops); 88 unregister_pernet_subsys(&arptable_filter_net_ops);
142} 89}
143 90
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 3ce53cf13d5..c92f4e541cf 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -28,6 +28,7 @@
28#include <linux/netfilter/x_tables.h> 28#include <linux/netfilter/x_tables.h>
29#include <linux/netfilter_ipv4/ip_tables.h> 29#include <linux/netfilter_ipv4/ip_tables.h>
30#include <net/netfilter/nf_log.h> 30#include <net/netfilter/nf_log.h>
31#include "../../netfilter/xt_repldata.h"
31 32
32MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
33MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 34MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -66,6 +67,12 @@ do { \
66#define inline 67#define inline
67#endif 68#endif
68 69
70void *ipt_alloc_initial_table(const struct xt_table *info)
71{
72 return xt_alloc_initial_table(ipt, IPT);
73}
74EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
75
69/* 76/*
70 We keep a set of rules for each CPU, so we can avoid write-locking 77 We keep a set of rules for each CPU, so we can avoid write-locking
71 them in the softirq when updating the counters and therefore 78 them in the softirq when updating the counters and therefore
@@ -169,7 +176,7 @@ ipt_error(struct sk_buff *skb, const struct xt_target_param *par)
169 176
170/* Performance critical - called for every packet */ 177/* Performance critical - called for every packet */
171static inline bool 178static inline bool
172do_match(struct ipt_entry_match *m, const struct sk_buff *skb, 179do_match(const struct ipt_entry_match *m, const struct sk_buff *skb,
173 struct xt_match_param *par) 180 struct xt_match_param *par)
174{ 181{
175 par->match = m->u.kernel.match; 182 par->match = m->u.kernel.match;
@@ -184,7 +191,7 @@ do_match(struct ipt_entry_match *m, const struct sk_buff *skb,
184 191
185/* Performance critical */ 192/* Performance critical */
186static inline struct ipt_entry * 193static inline struct ipt_entry *
187get_entry(void *base, unsigned int offset) 194get_entry(const void *base, unsigned int offset)
188{ 195{
189 return (struct ipt_entry *)(base + offset); 196 return (struct ipt_entry *)(base + offset);
190} 197}
@@ -199,6 +206,13 @@ static inline bool unconditional(const struct ipt_ip *ip)
199#undef FWINV 206#undef FWINV
200} 207}
201 208
209/* for const-correctness */
210static inline const struct ipt_entry_target *
211ipt_get_target_c(const struct ipt_entry *e)
212{
213 return ipt_get_target((struct ipt_entry *)e);
214}
215
202#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 216#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
203 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 217 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
204static const char *const hooknames[] = { 218static const char *const hooknames[] = {
@@ -233,11 +247,11 @@ static struct nf_loginfo trace_loginfo = {
233 247
234/* Mildly perf critical (only if packet tracing is on) */ 248/* Mildly perf critical (only if packet tracing is on) */
235static inline int 249static inline int
236get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e, 250get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
237 const char *hookname, const char **chainname, 251 const char *hookname, const char **chainname,
238 const char **comment, unsigned int *rulenum) 252 const char **comment, unsigned int *rulenum)
239{ 253{
240 struct ipt_standard_target *t = (void *)ipt_get_target(s); 254 const struct ipt_standard_target *t = (void *)ipt_get_target_c(s);
241 255
242 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) { 256 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
243 /* Head of user chain: ERROR target with chainname */ 257 /* Head of user chain: ERROR target with chainname */
@@ -263,17 +277,18 @@ get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
263 return 0; 277 return 0;
264} 278}
265 279
266static void trace_packet(struct sk_buff *skb, 280static void trace_packet(const struct sk_buff *skb,
267 unsigned int hook, 281 unsigned int hook,
268 const struct net_device *in, 282 const struct net_device *in,
269 const struct net_device *out, 283 const struct net_device *out,
270 const char *tablename, 284 const char *tablename,
271 struct xt_table_info *private, 285 const struct xt_table_info *private,
272 struct ipt_entry *e) 286 const struct ipt_entry *e)
273{ 287{
274 void *table_base; 288 const void *table_base;
275 const struct ipt_entry *root; 289 const struct ipt_entry *root;
276 const char *hookname, *chainname, *comment; 290 const char *hookname, *chainname, *comment;
291 const struct ipt_entry *iter;
277 unsigned int rulenum = 0; 292 unsigned int rulenum = 0;
278 293
279 table_base = private->entries[smp_processor_id()]; 294 table_base = private->entries[smp_processor_id()];
@@ -282,10 +297,10 @@ static void trace_packet(struct sk_buff *skb,
282 hookname = chainname = hooknames[hook]; 297 hookname = chainname = hooknames[hook];
283 comment = comments[NF_IP_TRACE_COMMENT_RULE]; 298 comment = comments[NF_IP_TRACE_COMMENT_RULE];
284 299
285 IPT_ENTRY_ITERATE(root, 300 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
286 private->size - private->hook_entry[hook], 301 if (get_chainname_rulenum(iter, e, hookname,
287 get_chainname_rulenum, 302 &chainname, &comment, &rulenum) != 0)
288 e, hookname, &chainname, &comment, &rulenum); 303 break;
289 304
290 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo, 305 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
291 "TRACE: %s:%s:%s:%u ", 306 "TRACE: %s:%s:%s:%u ",
@@ -315,9 +330,9 @@ ipt_do_table(struct sk_buff *skb,
315 /* Initializing verdict to NF_DROP keeps gcc happy. */ 330 /* Initializing verdict to NF_DROP keeps gcc happy. */
316 unsigned int verdict = NF_DROP; 331 unsigned int verdict = NF_DROP;
317 const char *indev, *outdev; 332 const char *indev, *outdev;
318 void *table_base; 333 const void *table_base;
319 struct ipt_entry *e, *back; 334 struct ipt_entry *e, *back;
320 struct xt_table_info *private; 335 const struct xt_table_info *private;
321 struct xt_match_param mtpar; 336 struct xt_match_param mtpar;
322 struct xt_target_param tgpar; 337 struct xt_target_param tgpar;
323 338
@@ -350,17 +365,22 @@ ipt_do_table(struct sk_buff *skb,
350 back = get_entry(table_base, private->underflow[hook]); 365 back = get_entry(table_base, private->underflow[hook]);
351 366
352 do { 367 do {
353 struct ipt_entry_target *t; 368 const struct ipt_entry_target *t;
369 const struct xt_entry_match *ematch;
354 370
355 IP_NF_ASSERT(e); 371 IP_NF_ASSERT(e);
356 IP_NF_ASSERT(back); 372 IP_NF_ASSERT(back);
357 if (!ip_packet_match(ip, indev, outdev, 373 if (!ip_packet_match(ip, indev, outdev,
358 &e->ip, mtpar.fragoff) || 374 &e->ip, mtpar.fragoff)) {
359 IPT_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) { 375 no_match:
360 e = ipt_next_entry(e); 376 e = ipt_next_entry(e);
361 continue; 377 continue;
362 } 378 }
363 379
380 xt_ematch_foreach(ematch, e)
381 if (do_match(ematch, skb, &mtpar) != 0)
382 goto no_match;
383
364 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1); 384 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
365 385
366 t = ipt_get_target(e); 386 t = ipt_get_target(e);
@@ -443,7 +463,7 @@ ipt_do_table(struct sk_buff *skb,
443/* Figures out from what hook each rule can be called: returns 0 if 463/* Figures out from what hook each rule can be called: returns 0 if
444 there are loops. Puts hook bitmask in comefrom. */ 464 there are loops. Puts hook bitmask in comefrom. */
445static int 465static int
446mark_source_chains(struct xt_table_info *newinfo, 466mark_source_chains(const struct xt_table_info *newinfo,
447 unsigned int valid_hooks, void *entry0) 467 unsigned int valid_hooks, void *entry0)
448{ 468{
449 unsigned int hook; 469 unsigned int hook;
@@ -461,8 +481,8 @@ mark_source_chains(struct xt_table_info *newinfo,
461 e->counters.pcnt = pos; 481 e->counters.pcnt = pos;
462 482
463 for (;;) { 483 for (;;) {
464 struct ipt_standard_target *t 484 const struct ipt_standard_target *t
465 = (void *)ipt_get_target(e); 485 = (void *)ipt_get_target_c(e);
466 int visited = e->comefrom & (1 << hook); 486 int visited = e->comefrom & (1 << hook);
467 487
468 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { 488 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
@@ -552,27 +572,23 @@ mark_source_chains(struct xt_table_info *newinfo,
552 return 1; 572 return 1;
553} 573}
554 574
555static int 575static void cleanup_match(struct ipt_entry_match *m, struct net *net)
556cleanup_match(struct ipt_entry_match *m, unsigned int *i)
557{ 576{
558 struct xt_mtdtor_param par; 577 struct xt_mtdtor_param par;
559 578
560 if (i && (*i)-- == 0) 579 par.net = net;
561 return 1;
562
563 par.match = m->u.kernel.match; 580 par.match = m->u.kernel.match;
564 par.matchinfo = m->data; 581 par.matchinfo = m->data;
565 par.family = NFPROTO_IPV4; 582 par.family = NFPROTO_IPV4;
566 if (par.match->destroy != NULL) 583 if (par.match->destroy != NULL)
567 par.match->destroy(&par); 584 par.match->destroy(&par);
568 module_put(par.match->me); 585 module_put(par.match->me);
569 return 0;
570} 586}
571 587
572static int 588static int
573check_entry(struct ipt_entry *e, const char *name) 589check_entry(const struct ipt_entry *e, const char *name)
574{ 590{
575 struct ipt_entry_target *t; 591 const struct ipt_entry_target *t;
576 592
577 if (!ip_checkentry(&e->ip)) { 593 if (!ip_checkentry(&e->ip)) {
578 duprintf("ip_tables: ip check failed %p %s.\n", e, name); 594 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
@@ -583,7 +599,7 @@ check_entry(struct ipt_entry *e, const char *name)
583 e->next_offset) 599 e->next_offset)
584 return -EINVAL; 600 return -EINVAL;
585 601
586 t = ipt_get_target(e); 602 t = ipt_get_target_c(e);
587 if (e->target_offset + t->u.target_size > e->next_offset) 603 if (e->target_offset + t->u.target_size > e->next_offset)
588 return -EINVAL; 604 return -EINVAL;
589 605
@@ -591,8 +607,7 @@ check_entry(struct ipt_entry *e, const char *name)
591} 607}
592 608
593static int 609static int
594check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par, 610check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par)
595 unsigned int *i)
596{ 611{
597 const struct ipt_ip *ip = par->entryinfo; 612 const struct ipt_ip *ip = par->entryinfo;
598 int ret; 613 int ret;
@@ -607,13 +622,11 @@ check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
607 par.match->name); 622 par.match->name);
608 return ret; 623 return ret;
609 } 624 }
610 ++*i;
611 return 0; 625 return 0;
612} 626}
613 627
614static int 628static int
615find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par, 629find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par)
616 unsigned int *i)
617{ 630{
618 struct xt_match *match; 631 struct xt_match *match;
619 int ret; 632 int ret;
@@ -627,7 +640,7 @@ find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
627 } 640 }
628 m->u.kernel.match = match; 641 m->u.kernel.match = match;
629 642
630 ret = check_match(m, par, i); 643 ret = check_match(m, par);
631 if (ret) 644 if (ret)
632 goto err; 645 goto err;
633 646
@@ -637,10 +650,11 @@ err:
637 return ret; 650 return ret;
638} 651}
639 652
640static int check_target(struct ipt_entry *e, const char *name) 653static int check_target(struct ipt_entry *e, struct net *net, const char *name)
641{ 654{
642 struct ipt_entry_target *t = ipt_get_target(e); 655 struct ipt_entry_target *t = ipt_get_target(e);
643 struct xt_tgchk_param par = { 656 struct xt_tgchk_param par = {
657 .net = net,
644 .table = name, 658 .table = name,
645 .entryinfo = e, 659 .entryinfo = e,
646 .target = t->u.kernel.target, 660 .target = t->u.kernel.target,
@@ -661,27 +675,32 @@ static int check_target(struct ipt_entry *e, const char *name)
661} 675}
662 676
663static int 677static int
664find_check_entry(struct ipt_entry *e, const char *name, unsigned int size, 678find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
665 unsigned int *i) 679 unsigned int size)
666{ 680{
667 struct ipt_entry_target *t; 681 struct ipt_entry_target *t;
668 struct xt_target *target; 682 struct xt_target *target;
669 int ret; 683 int ret;
670 unsigned int j; 684 unsigned int j;
671 struct xt_mtchk_param mtpar; 685 struct xt_mtchk_param mtpar;
686 struct xt_entry_match *ematch;
672 687
673 ret = check_entry(e, name); 688 ret = check_entry(e, name);
674 if (ret) 689 if (ret)
675 return ret; 690 return ret;
676 691
677 j = 0; 692 j = 0;
693 mtpar.net = net;
678 mtpar.table = name; 694 mtpar.table = name;
679 mtpar.entryinfo = &e->ip; 695 mtpar.entryinfo = &e->ip;
680 mtpar.hook_mask = e->comefrom; 696 mtpar.hook_mask = e->comefrom;
681 mtpar.family = NFPROTO_IPV4; 697 mtpar.family = NFPROTO_IPV4;
682 ret = IPT_MATCH_ITERATE(e, find_check_match, &mtpar, &j); 698 xt_ematch_foreach(ematch, e) {
683 if (ret != 0) 699 ret = find_check_match(ematch, &mtpar);
684 goto cleanup_matches; 700 if (ret != 0)
701 goto cleanup_matches;
702 ++j;
703 }
685 704
686 t = ipt_get_target(e); 705 t = ipt_get_target(e);
687 target = try_then_request_module(xt_find_target(AF_INET, 706 target = try_then_request_module(xt_find_target(AF_INET,
@@ -695,27 +714,29 @@ find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
695 } 714 }
696 t->u.kernel.target = target; 715 t->u.kernel.target = target;
697 716
698 ret = check_target(e, name); 717 ret = check_target(e, net, name);
699 if (ret) 718 if (ret)
700 goto err; 719 goto err;
701
702 (*i)++;
703 return 0; 720 return 0;
704 err: 721 err:
705 module_put(t->u.kernel.target->me); 722 module_put(t->u.kernel.target->me);
706 cleanup_matches: 723 cleanup_matches:
707 IPT_MATCH_ITERATE(e, cleanup_match, &j); 724 xt_ematch_foreach(ematch, e) {
725 if (j-- == 0)
726 break;
727 cleanup_match(ematch, net);
728 }
708 return ret; 729 return ret;
709} 730}
710 731
711static bool check_underflow(struct ipt_entry *e) 732static bool check_underflow(const struct ipt_entry *e)
712{ 733{
713 const struct ipt_entry_target *t; 734 const struct ipt_entry_target *t;
714 unsigned int verdict; 735 unsigned int verdict;
715 736
716 if (!unconditional(&e->ip)) 737 if (!unconditional(&e->ip))
717 return false; 738 return false;
718 t = ipt_get_target(e); 739 t = ipt_get_target_c(e);
719 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) 740 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
720 return false; 741 return false;
721 verdict = ((struct ipt_standard_target *)t)->verdict; 742 verdict = ((struct ipt_standard_target *)t)->verdict;
@@ -726,12 +747,11 @@ static bool check_underflow(struct ipt_entry *e)
726static int 747static int
727check_entry_size_and_hooks(struct ipt_entry *e, 748check_entry_size_and_hooks(struct ipt_entry *e,
728 struct xt_table_info *newinfo, 749 struct xt_table_info *newinfo,
729 unsigned char *base, 750 const unsigned char *base,
730 unsigned char *limit, 751 const unsigned char *limit,
731 const unsigned int *hook_entries, 752 const unsigned int *hook_entries,
732 const unsigned int *underflows, 753 const unsigned int *underflows,
733 unsigned int valid_hooks, 754 unsigned int valid_hooks)
734 unsigned int *i)
735{ 755{
736 unsigned int h; 756 unsigned int h;
737 757
@@ -768,50 +788,42 @@ check_entry_size_and_hooks(struct ipt_entry *e,
768 /* Clear counters and comefrom */ 788 /* Clear counters and comefrom */
769 e->counters = ((struct xt_counters) { 0, 0 }); 789 e->counters = ((struct xt_counters) { 0, 0 });
770 e->comefrom = 0; 790 e->comefrom = 0;
771
772 (*i)++;
773 return 0; 791 return 0;
774} 792}
775 793
776static int 794static void
777cleanup_entry(struct ipt_entry *e, unsigned int *i) 795cleanup_entry(struct ipt_entry *e, struct net *net)
778{ 796{
779 struct xt_tgdtor_param par; 797 struct xt_tgdtor_param par;
780 struct ipt_entry_target *t; 798 struct ipt_entry_target *t;
781 799 struct xt_entry_match *ematch;
782 if (i && (*i)-- == 0)
783 return 1;
784 800
785 /* Cleanup all matches */ 801 /* Cleanup all matches */
786 IPT_MATCH_ITERATE(e, cleanup_match, NULL); 802 xt_ematch_foreach(ematch, e)
803 cleanup_match(ematch, net);
787 t = ipt_get_target(e); 804 t = ipt_get_target(e);
788 805
806 par.net = net;
789 par.target = t->u.kernel.target; 807 par.target = t->u.kernel.target;
790 par.targinfo = t->data; 808 par.targinfo = t->data;
791 par.family = NFPROTO_IPV4; 809 par.family = NFPROTO_IPV4;
792 if (par.target->destroy != NULL) 810 if (par.target->destroy != NULL)
793 par.target->destroy(&par); 811 par.target->destroy(&par);
794 module_put(par.target->me); 812 module_put(par.target->me);
795 return 0;
796} 813}
797 814
798/* Checks and translates the user-supplied table segment (held in 815/* Checks and translates the user-supplied table segment (held in
799 newinfo) */ 816 newinfo) */
800static int 817static int
801translate_table(const char *name, 818translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
802 unsigned int valid_hooks, 819 const struct ipt_replace *repl)
803 struct xt_table_info *newinfo,
804 void *entry0,
805 unsigned int size,
806 unsigned int number,
807 const unsigned int *hook_entries,
808 const unsigned int *underflows)
809{ 820{
821 struct ipt_entry *iter;
810 unsigned int i; 822 unsigned int i;
811 int ret; 823 int ret = 0;
812 824
813 newinfo->size = size; 825 newinfo->size = repl->size;
814 newinfo->number = number; 826 newinfo->number = repl->num_entries;
815 827
816 /* Init all hooks to impossible value. */ 828 /* Init all hooks to impossible value. */
817 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 829 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
@@ -822,49 +834,56 @@ translate_table(const char *name,
822 duprintf("translate_table: size %u\n", newinfo->size); 834 duprintf("translate_table: size %u\n", newinfo->size);
823 i = 0; 835 i = 0;
824 /* Walk through entries, checking offsets. */ 836 /* Walk through entries, checking offsets. */
825 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size, 837 xt_entry_foreach(iter, entry0, newinfo->size) {
826 check_entry_size_and_hooks, 838 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
827 newinfo, 839 entry0 + repl->size, repl->hook_entry, repl->underflow,
828 entry0, 840 repl->valid_hooks);
829 entry0 + size, 841 if (ret != 0)
830 hook_entries, underflows, valid_hooks, &i); 842 return ret;
831 if (ret != 0) 843 ++i;
832 return ret; 844 }
833 845
834 if (i != number) { 846 if (i != repl->num_entries) {
835 duprintf("translate_table: %u not %u entries\n", 847 duprintf("translate_table: %u not %u entries\n",
836 i, number); 848 i, repl->num_entries);
837 return -EINVAL; 849 return -EINVAL;
838 } 850 }
839 851
840 /* Check hooks all assigned */ 852 /* Check hooks all assigned */
841 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 853 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
842 /* Only hooks which are valid */ 854 /* Only hooks which are valid */
843 if (!(valid_hooks & (1 << i))) 855 if (!(repl->valid_hooks & (1 << i)))
844 continue; 856 continue;
845 if (newinfo->hook_entry[i] == 0xFFFFFFFF) { 857 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
846 duprintf("Invalid hook entry %u %u\n", 858 duprintf("Invalid hook entry %u %u\n",
847 i, hook_entries[i]); 859 i, repl->hook_entry[i]);
848 return -EINVAL; 860 return -EINVAL;
849 } 861 }
850 if (newinfo->underflow[i] == 0xFFFFFFFF) { 862 if (newinfo->underflow[i] == 0xFFFFFFFF) {
851 duprintf("Invalid underflow %u %u\n", 863 duprintf("Invalid underflow %u %u\n",
852 i, underflows[i]); 864 i, repl->underflow[i]);
853 return -EINVAL; 865 return -EINVAL;
854 } 866 }
855 } 867 }
856 868
857 if (!mark_source_chains(newinfo, valid_hooks, entry0)) 869 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
858 return -ELOOP; 870 return -ELOOP;
859 871
860 /* Finally, each sanity check must pass */ 872 /* Finally, each sanity check must pass */
861 i = 0; 873 i = 0;
862 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size, 874 xt_entry_foreach(iter, entry0, newinfo->size) {
863 find_check_entry, name, size, &i); 875 ret = find_check_entry(iter, net, repl->name, repl->size);
876 if (ret != 0)
877 break;
878 ++i;
879 }
864 880
865 if (ret != 0) { 881 if (ret != 0) {
866 IPT_ENTRY_ITERATE(entry0, newinfo->size, 882 xt_entry_foreach(iter, entry0, newinfo->size) {
867 cleanup_entry, &i); 883 if (i-- == 0)
884 break;
885 cleanup_entry(iter, net);
886 }
868 return ret; 887 return ret;
869 } 888 }
870 889
@@ -877,33 +896,11 @@ translate_table(const char *name,
877 return ret; 896 return ret;
878} 897}
879 898
880/* Gets counters. */
881static inline int
882add_entry_to_counter(const struct ipt_entry *e,
883 struct xt_counters total[],
884 unsigned int *i)
885{
886 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
887
888 (*i)++;
889 return 0;
890}
891
892static inline int
893set_entry_to_counter(const struct ipt_entry *e,
894 struct ipt_counters total[],
895 unsigned int *i)
896{
897 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
898
899 (*i)++;
900 return 0;
901}
902
903static void 899static void
904get_counters(const struct xt_table_info *t, 900get_counters(const struct xt_table_info *t,
905 struct xt_counters counters[]) 901 struct xt_counters counters[])
906{ 902{
903 struct ipt_entry *iter;
907 unsigned int cpu; 904 unsigned int cpu;
908 unsigned int i; 905 unsigned int i;
909 unsigned int curcpu; 906 unsigned int curcpu;
@@ -919,32 +916,32 @@ get_counters(const struct xt_table_info *t,
919 curcpu = smp_processor_id(); 916 curcpu = smp_processor_id();
920 917
921 i = 0; 918 i = 0;
922 IPT_ENTRY_ITERATE(t->entries[curcpu], 919 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
923 t->size, 920 SET_COUNTER(counters[i], iter->counters.bcnt,
924 set_entry_to_counter, 921 iter->counters.pcnt);
925 counters, 922 ++i;
926 &i); 923 }
927 924
928 for_each_possible_cpu(cpu) { 925 for_each_possible_cpu(cpu) {
929 if (cpu == curcpu) 926 if (cpu == curcpu)
930 continue; 927 continue;
931 i = 0; 928 i = 0;
932 xt_info_wrlock(cpu); 929 xt_info_wrlock(cpu);
933 IPT_ENTRY_ITERATE(t->entries[cpu], 930 xt_entry_foreach(iter, t->entries[cpu], t->size) {
934 t->size, 931 ADD_COUNTER(counters[i], iter->counters.bcnt,
935 add_entry_to_counter, 932 iter->counters.pcnt);
936 counters, 933 ++i; /* macro does multi eval of i */
937 &i); 934 }
938 xt_info_wrunlock(cpu); 935 xt_info_wrunlock(cpu);
939 } 936 }
940 local_bh_enable(); 937 local_bh_enable();
941} 938}
942 939
943static struct xt_counters * alloc_counters(struct xt_table *table) 940static struct xt_counters *alloc_counters(const struct xt_table *table)
944{ 941{
945 unsigned int countersize; 942 unsigned int countersize;
946 struct xt_counters *counters; 943 struct xt_counters *counters;
947 struct xt_table_info *private = table->private; 944 const struct xt_table_info *private = table->private;
948 945
949 /* We need atomic snapshot of counters: rest doesn't change 946 /* We need atomic snapshot of counters: rest doesn't change
950 (other than comefrom, which userspace doesn't care 947 (other than comefrom, which userspace doesn't care
@@ -962,11 +959,11 @@ static struct xt_counters * alloc_counters(struct xt_table *table)
962 959
963static int 960static int
964copy_entries_to_user(unsigned int total_size, 961copy_entries_to_user(unsigned int total_size,
965 struct xt_table *table, 962 const struct xt_table *table,
966 void __user *userptr) 963 void __user *userptr)
967{ 964{
968 unsigned int off, num; 965 unsigned int off, num;
969 struct ipt_entry *e; 966 const struct ipt_entry *e;
970 struct xt_counters *counters; 967 struct xt_counters *counters;
971 const struct xt_table_info *private = table->private; 968 const struct xt_table_info *private = table->private;
972 int ret = 0; 969 int ret = 0;
@@ -1018,7 +1015,7 @@ copy_entries_to_user(unsigned int total_size,
1018 } 1015 }
1019 } 1016 }
1020 1017
1021 t = ipt_get_target(e); 1018 t = ipt_get_target_c(e);
1022 if (copy_to_user(userptr + off + e->target_offset 1019 if (copy_to_user(userptr + off + e->target_offset
1023 + offsetof(struct ipt_entry_target, 1020 + offsetof(struct ipt_entry_target,
1024 u.user.name), 1021 u.user.name),
@@ -1035,7 +1032,7 @@ copy_entries_to_user(unsigned int total_size,
1035} 1032}
1036 1033
1037#ifdef CONFIG_COMPAT 1034#ifdef CONFIG_COMPAT
1038static void compat_standard_from_user(void *dst, void *src) 1035static void compat_standard_from_user(void *dst, const void *src)
1039{ 1036{
1040 int v = *(compat_int_t *)src; 1037 int v = *(compat_int_t *)src;
1041 1038
@@ -1044,7 +1041,7 @@ static void compat_standard_from_user(void *dst, void *src)
1044 memcpy(dst, &v, sizeof(v)); 1041 memcpy(dst, &v, sizeof(v));
1045} 1042}
1046 1043
1047static int compat_standard_to_user(void __user *dst, void *src) 1044static int compat_standard_to_user(void __user *dst, const void *src)
1048{ 1045{
1049 compat_int_t cv = *(int *)src; 1046 compat_int_t cv = *(int *)src;
1050 1047
@@ -1053,25 +1050,20 @@ static int compat_standard_to_user(void __user *dst, void *src)
1053 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; 1050 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1054} 1051}
1055 1052
1056static inline int 1053static int compat_calc_entry(const struct ipt_entry *e,
1057compat_calc_match(struct ipt_entry_match *m, int *size)
1058{
1059 *size += xt_compat_match_offset(m->u.kernel.match);
1060 return 0;
1061}
1062
1063static int compat_calc_entry(struct ipt_entry *e,
1064 const struct xt_table_info *info, 1054 const struct xt_table_info *info,
1065 void *base, struct xt_table_info *newinfo) 1055 const void *base, struct xt_table_info *newinfo)
1066{ 1056{
1067 struct ipt_entry_target *t; 1057 const struct xt_entry_match *ematch;
1058 const struct ipt_entry_target *t;
1068 unsigned int entry_offset; 1059 unsigned int entry_offset;
1069 int off, i, ret; 1060 int off, i, ret;
1070 1061
1071 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1062 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1072 entry_offset = (void *)e - base; 1063 entry_offset = (void *)e - base;
1073 IPT_MATCH_ITERATE(e, compat_calc_match, &off); 1064 xt_ematch_foreach(ematch, e)
1074 t = ipt_get_target(e); 1065 off += xt_compat_match_offset(ematch->u.kernel.match);
1066 t = ipt_get_target_c(e);
1075 off += xt_compat_target_offset(t->u.kernel.target); 1067 off += xt_compat_target_offset(t->u.kernel.target);
1076 newinfo->size -= off; 1068 newinfo->size -= off;
1077 ret = xt_compat_add_offset(AF_INET, entry_offset, off); 1069 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
@@ -1092,7 +1084,9 @@ static int compat_calc_entry(struct ipt_entry *e,
1092static int compat_table_info(const struct xt_table_info *info, 1084static int compat_table_info(const struct xt_table_info *info,
1093 struct xt_table_info *newinfo) 1085 struct xt_table_info *newinfo)
1094{ 1086{
1087 struct ipt_entry *iter;
1095 void *loc_cpu_entry; 1088 void *loc_cpu_entry;
1089 int ret;
1096 1090
1097 if (!newinfo || !info) 1091 if (!newinfo || !info)
1098 return -EINVAL; 1092 return -EINVAL;
@@ -1101,13 +1095,17 @@ static int compat_table_info(const struct xt_table_info *info,
1101 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); 1095 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1102 newinfo->initial_entries = 0; 1096 newinfo->initial_entries = 0;
1103 loc_cpu_entry = info->entries[raw_smp_processor_id()]; 1097 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1104 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size, 1098 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1105 compat_calc_entry, info, loc_cpu_entry, 1099 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1106 newinfo); 1100 if (ret != 0)
1101 return ret;
1102 }
1103 return 0;
1107} 1104}
1108#endif 1105#endif
1109 1106
1110static int get_info(struct net *net, void __user *user, int *len, int compat) 1107static int get_info(struct net *net, void __user *user,
1108 const int *len, int compat)
1111{ 1109{
1112 char name[IPT_TABLE_MAXNAMELEN]; 1110 char name[IPT_TABLE_MAXNAMELEN];
1113 struct xt_table *t; 1111 struct xt_table *t;
@@ -1167,7 +1165,8 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
1167} 1165}
1168 1166
1169static int 1167static int
1170get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len) 1168get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1169 const int *len)
1171{ 1170{
1172 int ret; 1171 int ret;
1173 struct ipt_get_entries get; 1172 struct ipt_get_entries get;
@@ -1215,6 +1214,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1215 struct xt_table_info *oldinfo; 1214 struct xt_table_info *oldinfo;
1216 struct xt_counters *counters; 1215 struct xt_counters *counters;
1217 void *loc_cpu_old_entry; 1216 void *loc_cpu_old_entry;
1217 struct ipt_entry *iter;
1218 1218
1219 ret = 0; 1219 ret = 0;
1220 counters = vmalloc(num_counters * sizeof(struct xt_counters)); 1220 counters = vmalloc(num_counters * sizeof(struct xt_counters));
@@ -1257,8 +1257,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1257 1257
1258 /* Decrease module usage counts and free resource */ 1258 /* Decrease module usage counts and free resource */
1259 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1259 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1260 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, 1260 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1261 NULL); 1261 cleanup_entry(iter, net);
1262
1262 xt_free_table_info(oldinfo); 1263 xt_free_table_info(oldinfo);
1263 if (copy_to_user(counters_ptr, counters, 1264 if (copy_to_user(counters_ptr, counters,
1264 sizeof(struct xt_counters) * num_counters) != 0) 1265 sizeof(struct xt_counters) * num_counters) != 0)
@@ -1277,12 +1278,13 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1277} 1278}
1278 1279
1279static int 1280static int
1280do_replace(struct net *net, void __user *user, unsigned int len) 1281do_replace(struct net *net, const void __user *user, unsigned int len)
1281{ 1282{
1282 int ret; 1283 int ret;
1283 struct ipt_replace tmp; 1284 struct ipt_replace tmp;
1284 struct xt_table_info *newinfo; 1285 struct xt_table_info *newinfo;
1285 void *loc_cpu_entry; 1286 void *loc_cpu_entry;
1287 struct ipt_entry *iter;
1286 1288
1287 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1289 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1288 return -EFAULT; 1290 return -EFAULT;
@@ -1303,9 +1305,7 @@ do_replace(struct net *net, void __user *user, unsigned int len)
1303 goto free_newinfo; 1305 goto free_newinfo;
1304 } 1306 }
1305 1307
1306 ret = translate_table(tmp.name, tmp.valid_hooks, 1308 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1307 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1308 tmp.hook_entry, tmp.underflow);
1309 if (ret != 0) 1309 if (ret != 0)
1310 goto free_newinfo; 1310 goto free_newinfo;
1311 1311
@@ -1318,27 +1318,16 @@ do_replace(struct net *net, void __user *user, unsigned int len)
1318 return 0; 1318 return 0;
1319 1319
1320 free_newinfo_untrans: 1320 free_newinfo_untrans:
1321 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1321 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1322 cleanup_entry(iter, net);
1322 free_newinfo: 1323 free_newinfo:
1323 xt_free_table_info(newinfo); 1324 xt_free_table_info(newinfo);
1324 return ret; 1325 return ret;
1325} 1326}
1326 1327
1327/* We're lazy, and add to the first CPU; overflow works its fey magic
1328 * and everything is OK. */
1329static int 1328static int
1330add_counter_to_entry(struct ipt_entry *e, 1329do_add_counters(struct net *net, const void __user *user,
1331 const struct xt_counters addme[], 1330 unsigned int len, int compat)
1332 unsigned int *i)
1333{
1334 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1335
1336 (*i)++;
1337 return 0;
1338}
1339
1340static int
1341do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
1342{ 1331{
1343 unsigned int i, curcpu; 1332 unsigned int i, curcpu;
1344 struct xt_counters_info tmp; 1333 struct xt_counters_info tmp;
@@ -1351,6 +1340,7 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
1351 const struct xt_table_info *private; 1340 const struct xt_table_info *private;
1352 int ret = 0; 1341 int ret = 0;
1353 void *loc_cpu_entry; 1342 void *loc_cpu_entry;
1343 struct ipt_entry *iter;
1354#ifdef CONFIG_COMPAT 1344#ifdef CONFIG_COMPAT
1355 struct compat_xt_counters_info compat_tmp; 1345 struct compat_xt_counters_info compat_tmp;
1356 1346
@@ -1408,11 +1398,10 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
1408 curcpu = smp_processor_id(); 1398 curcpu = smp_processor_id();
1409 loc_cpu_entry = private->entries[curcpu]; 1399 loc_cpu_entry = private->entries[curcpu];
1410 xt_info_wrlock(curcpu); 1400 xt_info_wrlock(curcpu);
1411 IPT_ENTRY_ITERATE(loc_cpu_entry, 1401 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1412 private->size, 1402 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1413 add_counter_to_entry, 1403 ++i;
1414 paddc, 1404 }
1415 &i);
1416 xt_info_wrunlock(curcpu); 1405 xt_info_wrunlock(curcpu);
1417 unlock_up_free: 1406 unlock_up_free:
1418 local_bh_enable(); 1407 local_bh_enable();
@@ -1440,45 +1429,40 @@ struct compat_ipt_replace {
1440static int 1429static int
1441compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr, 1430compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1442 unsigned int *size, struct xt_counters *counters, 1431 unsigned int *size, struct xt_counters *counters,
1443 unsigned int *i) 1432 unsigned int i)
1444{ 1433{
1445 struct ipt_entry_target *t; 1434 struct ipt_entry_target *t;
1446 struct compat_ipt_entry __user *ce; 1435 struct compat_ipt_entry __user *ce;
1447 u_int16_t target_offset, next_offset; 1436 u_int16_t target_offset, next_offset;
1448 compat_uint_t origsize; 1437 compat_uint_t origsize;
1449 int ret; 1438 const struct xt_entry_match *ematch;
1439 int ret = 0;
1450 1440
1451 ret = -EFAULT;
1452 origsize = *size; 1441 origsize = *size;
1453 ce = (struct compat_ipt_entry __user *)*dstptr; 1442 ce = (struct compat_ipt_entry __user *)*dstptr;
1454 if (copy_to_user(ce, e, sizeof(struct ipt_entry))) 1443 if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
1455 goto out; 1444 copy_to_user(&ce->counters, &counters[i],
1456 1445 sizeof(counters[i])) != 0)
1457 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i]))) 1446 return -EFAULT;
1458 goto out;
1459 1447
1460 *dstptr += sizeof(struct compat_ipt_entry); 1448 *dstptr += sizeof(struct compat_ipt_entry);
1461 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1449 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1462 1450
1463 ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size); 1451 xt_ematch_foreach(ematch, e) {
1452 ret = xt_compat_match_to_user(ematch, dstptr, size);
1453 if (ret != 0)
1454 return ret;
1455 }
1464 target_offset = e->target_offset - (origsize - *size); 1456 target_offset = e->target_offset - (origsize - *size);
1465 if (ret)
1466 goto out;
1467 t = ipt_get_target(e); 1457 t = ipt_get_target(e);
1468 ret = xt_compat_target_to_user(t, dstptr, size); 1458 ret = xt_compat_target_to_user(t, dstptr, size);
1469 if (ret) 1459 if (ret)
1470 goto out; 1460 return ret;
1471 ret = -EFAULT;
1472 next_offset = e->next_offset - (origsize - *size); 1461 next_offset = e->next_offset - (origsize - *size);
1473 if (put_user(target_offset, &ce->target_offset)) 1462 if (put_user(target_offset, &ce->target_offset) != 0 ||
1474 goto out; 1463 put_user(next_offset, &ce->next_offset) != 0)
1475 if (put_user(next_offset, &ce->next_offset)) 1464 return -EFAULT;
1476 goto out;
1477
1478 (*i)++;
1479 return 0; 1465 return 0;
1480out:
1481 return ret;
1482} 1466}
1483 1467
1484static int 1468static int
@@ -1486,7 +1470,7 @@ compat_find_calc_match(struct ipt_entry_match *m,
1486 const char *name, 1470 const char *name,
1487 const struct ipt_ip *ip, 1471 const struct ipt_ip *ip,
1488 unsigned int hookmask, 1472 unsigned int hookmask,
1489 int *size, unsigned int *i) 1473 int *size)
1490{ 1474{
1491 struct xt_match *match; 1475 struct xt_match *match;
1492 1476
@@ -1500,47 +1484,32 @@ compat_find_calc_match(struct ipt_entry_match *m,
1500 } 1484 }
1501 m->u.kernel.match = match; 1485 m->u.kernel.match = match;
1502 *size += xt_compat_match_offset(match); 1486 *size += xt_compat_match_offset(match);
1503
1504 (*i)++;
1505 return 0;
1506}
1507
1508static int
1509compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1510{
1511 if (i && (*i)-- == 0)
1512 return 1;
1513
1514 module_put(m->u.kernel.match->me);
1515 return 0; 1487 return 0;
1516} 1488}
1517 1489
1518static int 1490static void compat_release_entry(struct compat_ipt_entry *e)
1519compat_release_entry(struct compat_ipt_entry *e, unsigned int *i)
1520{ 1491{
1521 struct ipt_entry_target *t; 1492 struct ipt_entry_target *t;
1522 1493 struct xt_entry_match *ematch;
1523 if (i && (*i)-- == 0)
1524 return 1;
1525 1494
1526 /* Cleanup all matches */ 1495 /* Cleanup all matches */
1527 COMPAT_IPT_MATCH_ITERATE(e, compat_release_match, NULL); 1496 xt_ematch_foreach(ematch, e)
1497 module_put(ematch->u.kernel.match->me);
1528 t = compat_ipt_get_target(e); 1498 t = compat_ipt_get_target(e);
1529 module_put(t->u.kernel.target->me); 1499 module_put(t->u.kernel.target->me);
1530 return 0;
1531} 1500}
1532 1501
1533static int 1502static int
1534check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, 1503check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1535 struct xt_table_info *newinfo, 1504 struct xt_table_info *newinfo,
1536 unsigned int *size, 1505 unsigned int *size,
1537 unsigned char *base, 1506 const unsigned char *base,
1538 unsigned char *limit, 1507 const unsigned char *limit,
1539 unsigned int *hook_entries, 1508 const unsigned int *hook_entries,
1540 unsigned int *underflows, 1509 const unsigned int *underflows,
1541 unsigned int *i,
1542 const char *name) 1510 const char *name)
1543{ 1511{
1512 struct xt_entry_match *ematch;
1544 struct ipt_entry_target *t; 1513 struct ipt_entry_target *t;
1545 struct xt_target *target; 1514 struct xt_target *target;
1546 unsigned int entry_offset; 1515 unsigned int entry_offset;
@@ -1569,10 +1538,13 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1569 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1538 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1570 entry_offset = (void *)e - (void *)base; 1539 entry_offset = (void *)e - (void *)base;
1571 j = 0; 1540 j = 0;
1572 ret = COMPAT_IPT_MATCH_ITERATE(e, compat_find_calc_match, name, 1541 xt_ematch_foreach(ematch, e) {
1573 &e->ip, e->comefrom, &off, &j); 1542 ret = compat_find_calc_match(ematch, name,
1574 if (ret != 0) 1543 &e->ip, e->comefrom, &off);
1575 goto release_matches; 1544 if (ret != 0)
1545 goto release_matches;
1546 ++j;
1547 }
1576 1548
1577 t = compat_ipt_get_target(e); 1549 t = compat_ipt_get_target(e);
1578 target = try_then_request_module(xt_find_target(AF_INET, 1550 target = try_then_request_module(xt_find_target(AF_INET,
@@ -1604,14 +1576,16 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1604 /* Clear counters and comefrom */ 1576 /* Clear counters and comefrom */
1605 memset(&e->counters, 0, sizeof(e->counters)); 1577 memset(&e->counters, 0, sizeof(e->counters));
1606 e->comefrom = 0; 1578 e->comefrom = 0;
1607
1608 (*i)++;
1609 return 0; 1579 return 0;
1610 1580
1611out: 1581out:
1612 module_put(t->u.kernel.target->me); 1582 module_put(t->u.kernel.target->me);
1613release_matches: 1583release_matches:
1614 IPT_MATCH_ITERATE(e, compat_release_match, &j); 1584 xt_ematch_foreach(ematch, e) {
1585 if (j-- == 0)
1586 break;
1587 module_put(ematch->u.kernel.match->me);
1588 }
1615 return ret; 1589 return ret;
1616} 1590}
1617 1591
@@ -1625,6 +1599,7 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1625 struct ipt_entry *de; 1599 struct ipt_entry *de;
1626 unsigned int origsize; 1600 unsigned int origsize;
1627 int ret, h; 1601 int ret, h;
1602 struct xt_entry_match *ematch;
1628 1603
1629 ret = 0; 1604 ret = 0;
1630 origsize = *size; 1605 origsize = *size;
@@ -1635,10 +1610,11 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1635 *dstptr += sizeof(struct ipt_entry); 1610 *dstptr += sizeof(struct ipt_entry);
1636 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); 1611 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1637 1612
1638 ret = COMPAT_IPT_MATCH_ITERATE(e, xt_compat_match_from_user, 1613 xt_ematch_foreach(ematch, e) {
1639 dstptr, size); 1614 ret = xt_compat_match_from_user(ematch, dstptr, size);
1640 if (ret) 1615 if (ret != 0)
1641 return ret; 1616 return ret;
1617 }
1642 de->target_offset = e->target_offset - (origsize - *size); 1618 de->target_offset = e->target_offset - (origsize - *size);
1643 t = compat_ipt_get_target(e); 1619 t = compat_ipt_get_target(e);
1644 target = t->u.kernel.target; 1620 target = t->u.kernel.target;
@@ -1655,36 +1631,43 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1655} 1631}
1656 1632
1657static int 1633static int
1658compat_check_entry(struct ipt_entry *e, const char *name, 1634compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
1659 unsigned int *i)
1660{ 1635{
1636 struct xt_entry_match *ematch;
1661 struct xt_mtchk_param mtpar; 1637 struct xt_mtchk_param mtpar;
1662 unsigned int j; 1638 unsigned int j;
1663 int ret; 1639 int ret = 0;
1664 1640
1665 j = 0; 1641 j = 0;
1642 mtpar.net = net;
1666 mtpar.table = name; 1643 mtpar.table = name;
1667 mtpar.entryinfo = &e->ip; 1644 mtpar.entryinfo = &e->ip;
1668 mtpar.hook_mask = e->comefrom; 1645 mtpar.hook_mask = e->comefrom;
1669 mtpar.family = NFPROTO_IPV4; 1646 mtpar.family = NFPROTO_IPV4;
1670 ret = IPT_MATCH_ITERATE(e, check_match, &mtpar, &j); 1647 xt_ematch_foreach(ematch, e) {
1671 if (ret) 1648 ret = check_match(ematch, &mtpar);
1672 goto cleanup_matches; 1649 if (ret != 0)
1650 goto cleanup_matches;
1651 ++j;
1652 }
1673 1653
1674 ret = check_target(e, name); 1654 ret = check_target(e, net, name);
1675 if (ret) 1655 if (ret)
1676 goto cleanup_matches; 1656 goto cleanup_matches;
1677
1678 (*i)++;
1679 return 0; 1657 return 0;
1680 1658
1681 cleanup_matches: 1659 cleanup_matches:
1682 IPT_MATCH_ITERATE(e, cleanup_match, &j); 1660 xt_ematch_foreach(ematch, e) {
1661 if (j-- == 0)
1662 break;
1663 cleanup_match(ematch, net);
1664 }
1683 return ret; 1665 return ret;
1684} 1666}
1685 1667
1686static int 1668static int
1687translate_compat_table(const char *name, 1669translate_compat_table(struct net *net,
1670 const char *name,
1688 unsigned int valid_hooks, 1671 unsigned int valid_hooks,
1689 struct xt_table_info **pinfo, 1672 struct xt_table_info **pinfo,
1690 void **pentry0, 1673 void **pentry0,
@@ -1696,6 +1679,8 @@ translate_compat_table(const char *name,
1696 unsigned int i, j; 1679 unsigned int i, j;
1697 struct xt_table_info *newinfo, *info; 1680 struct xt_table_info *newinfo, *info;
1698 void *pos, *entry0, *entry1; 1681 void *pos, *entry0, *entry1;
1682 struct compat_ipt_entry *iter0;
1683 struct ipt_entry *iter1;
1699 unsigned int size; 1684 unsigned int size;
1700 int ret; 1685 int ret;
1701 1686
@@ -1714,13 +1699,14 @@ translate_compat_table(const char *name,
1714 j = 0; 1699 j = 0;
1715 xt_compat_lock(AF_INET); 1700 xt_compat_lock(AF_INET);
1716 /* Walk through entries, checking offsets. */ 1701 /* Walk through entries, checking offsets. */
1717 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, 1702 xt_entry_foreach(iter0, entry0, total_size) {
1718 check_compat_entry_size_and_hooks, 1703 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1719 info, &size, entry0, 1704 entry0, entry0 + total_size, hook_entries, underflows,
1720 entry0 + total_size, 1705 name);
1721 hook_entries, underflows, &j, name); 1706 if (ret != 0)
1722 if (ret != 0) 1707 goto out_unlock;
1723 goto out_unlock; 1708 ++j;
1709 }
1724 1710
1725 ret = -EINVAL; 1711 ret = -EINVAL;
1726 if (j != number) { 1712 if (j != number) {
@@ -1759,9 +1745,12 @@ translate_compat_table(const char *name,
1759 entry1 = newinfo->entries[raw_smp_processor_id()]; 1745 entry1 = newinfo->entries[raw_smp_processor_id()];
1760 pos = entry1; 1746 pos = entry1;
1761 size = total_size; 1747 size = total_size;
1762 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, 1748 xt_entry_foreach(iter0, entry0, total_size) {
1763 compat_copy_entry_from_user, 1749 ret = compat_copy_entry_from_user(iter0, &pos,
1764 &pos, &size, name, newinfo, entry1); 1750 &size, name, newinfo, entry1);
1751 if (ret != 0)
1752 break;
1753 }
1765 xt_compat_flush_offsets(AF_INET); 1754 xt_compat_flush_offsets(AF_INET);
1766 xt_compat_unlock(AF_INET); 1755 xt_compat_unlock(AF_INET);
1767 if (ret) 1756 if (ret)
@@ -1772,13 +1761,32 @@ translate_compat_table(const char *name,
1772 goto free_newinfo; 1761 goto free_newinfo;
1773 1762
1774 i = 0; 1763 i = 0;
1775 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry, 1764 xt_entry_foreach(iter1, entry1, newinfo->size) {
1776 name, &i); 1765 ret = compat_check_entry(iter1, net, name);
1766 if (ret != 0)
1767 break;
1768 ++i;
1769 }
1777 if (ret) { 1770 if (ret) {
1771 /*
1772 * The first i matches need cleanup_entry (calls ->destroy)
1773 * because they had called ->check already. The other j-i
1774 * entries need only release.
1775 */
1776 int skip = i;
1778 j -= i; 1777 j -= i;
1779 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i, 1778 xt_entry_foreach(iter0, entry0, newinfo->size) {
1780 compat_release_entry, &j); 1779 if (skip-- > 0)
1781 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i); 1780 continue;
1781 if (j-- == 0)
1782 break;
1783 compat_release_entry(iter0);
1784 }
1785 xt_entry_foreach(iter1, entry1, newinfo->size) {
1786 if (i-- == 0)
1787 break;
1788 cleanup_entry(iter1, net);
1789 }
1782 xt_free_table_info(newinfo); 1790 xt_free_table_info(newinfo);
1783 return ret; 1791 return ret;
1784 } 1792 }
@@ -1796,7 +1804,11 @@ translate_compat_table(const char *name,
1796free_newinfo: 1804free_newinfo:
1797 xt_free_table_info(newinfo); 1805 xt_free_table_info(newinfo);
1798out: 1806out:
1799 COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j); 1807 xt_entry_foreach(iter0, entry0, total_size) {
1808 if (j-- == 0)
1809 break;
1810 compat_release_entry(iter0);
1811 }
1800 return ret; 1812 return ret;
1801out_unlock: 1813out_unlock:
1802 xt_compat_flush_offsets(AF_INET); 1814 xt_compat_flush_offsets(AF_INET);
@@ -1811,6 +1823,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1811 struct compat_ipt_replace tmp; 1823 struct compat_ipt_replace tmp;
1812 struct xt_table_info *newinfo; 1824 struct xt_table_info *newinfo;
1813 void *loc_cpu_entry; 1825 void *loc_cpu_entry;
1826 struct ipt_entry *iter;
1814 1827
1815 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1828 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1816 return -EFAULT; 1829 return -EFAULT;
@@ -1833,7 +1846,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1833 goto free_newinfo; 1846 goto free_newinfo;
1834 } 1847 }
1835 1848
1836 ret = translate_compat_table(tmp.name, tmp.valid_hooks, 1849 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1837 &newinfo, &loc_cpu_entry, tmp.size, 1850 &newinfo, &loc_cpu_entry, tmp.size,
1838 tmp.num_entries, tmp.hook_entry, 1851 tmp.num_entries, tmp.hook_entry,
1839 tmp.underflow); 1852 tmp.underflow);
@@ -1849,7 +1862,8 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1849 return 0; 1862 return 0;
1850 1863
1851 free_newinfo_untrans: 1864 free_newinfo_untrans:
1852 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1865 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1866 cleanup_entry(iter, net);
1853 free_newinfo: 1867 free_newinfo:
1854 xt_free_table_info(newinfo); 1868 xt_free_table_info(newinfo);
1855 return ret; 1869 return ret;
@@ -1898,6 +1912,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1898 int ret = 0; 1912 int ret = 0;
1899 const void *loc_cpu_entry; 1913 const void *loc_cpu_entry;
1900 unsigned int i = 0; 1914 unsigned int i = 0;
1915 struct ipt_entry *iter;
1901 1916
1902 counters = alloc_counters(table); 1917 counters = alloc_counters(table);
1903 if (IS_ERR(counters)) 1918 if (IS_ERR(counters))
@@ -1910,9 +1925,12 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1910 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1925 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1911 pos = userptr; 1926 pos = userptr;
1912 size = total_size; 1927 size = total_size;
1913 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size, 1928 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1914 compat_copy_entry_to_user, 1929 ret = compat_copy_entry_to_user(iter, &pos,
1915 &pos, &size, counters, &i); 1930 &size, counters, i++);
1931 if (ret != 0)
1932 break;
1933 }
1916 1934
1917 vfree(counters); 1935 vfree(counters);
1918 return ret; 1936 return ret;
@@ -2086,11 +2104,7 @@ struct xt_table *ipt_register_table(struct net *net,
2086 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 2104 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2087 memcpy(loc_cpu_entry, repl->entries, repl->size); 2105 memcpy(loc_cpu_entry, repl->entries, repl->size);
2088 2106
2089 ret = translate_table(table->name, table->valid_hooks, 2107 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2090 newinfo, loc_cpu_entry, repl->size,
2091 repl->num_entries,
2092 repl->hook_entry,
2093 repl->underflow);
2094 if (ret != 0) 2108 if (ret != 0)
2095 goto out_free; 2109 goto out_free;
2096 2110
@@ -2108,17 +2122,19 @@ out:
2108 return ERR_PTR(ret); 2122 return ERR_PTR(ret);
2109} 2123}
2110 2124
2111void ipt_unregister_table(struct xt_table *table) 2125void ipt_unregister_table(struct net *net, struct xt_table *table)
2112{ 2126{
2113 struct xt_table_info *private; 2127 struct xt_table_info *private;
2114 void *loc_cpu_entry; 2128 void *loc_cpu_entry;
2115 struct module *table_owner = table->me; 2129 struct module *table_owner = table->me;
2130 struct ipt_entry *iter;
2116 2131
2117 private = xt_unregister_table(table); 2132 private = xt_unregister_table(table);
2118 2133
2119 /* Decrease module usage counts and free resources */ 2134 /* Decrease module usage counts and free resources */
2120 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 2135 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2121 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL); 2136 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2137 cleanup_entry(iter, net);
2122 if (private->number > private->initial_entries) 2138 if (private->number > private->initial_entries)
2123 module_put(table_owner); 2139 module_put(table_owner);
2124 xt_free_table_info(private); 2140 xt_free_table_info(private);
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 40ca2d240ab..0886f96c736 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -560,8 +560,7 @@ struct clusterip_seq_position {
560 560
561static void *clusterip_seq_start(struct seq_file *s, loff_t *pos) 561static void *clusterip_seq_start(struct seq_file *s, loff_t *pos)
562{ 562{
563 const struct proc_dir_entry *pde = s->private; 563 struct clusterip_config *c = s->private;
564 struct clusterip_config *c = pde->data;
565 unsigned int weight; 564 unsigned int weight;
566 u_int32_t local_nodes; 565 u_int32_t local_nodes;
567 struct clusterip_seq_position *idx; 566 struct clusterip_seq_position *idx;
@@ -632,10 +631,9 @@ static int clusterip_proc_open(struct inode *inode, struct file *file)
632 631
633 if (!ret) { 632 if (!ret) {
634 struct seq_file *sf = file->private_data; 633 struct seq_file *sf = file->private_data;
635 struct proc_dir_entry *pde = PDE(inode); 634 struct clusterip_config *c = PDE(inode)->data;
636 struct clusterip_config *c = pde->data;
637 635
638 sf->private = pde; 636 sf->private = c;
639 637
640 clusterip_config_get(c); 638 clusterip_config_get(c);
641 } 639 }
@@ -645,8 +643,7 @@ static int clusterip_proc_open(struct inode *inode, struct file *file)
645 643
646static int clusterip_proc_release(struct inode *inode, struct file *file) 644static int clusterip_proc_release(struct inode *inode, struct file *file)
647{ 645{
648 struct proc_dir_entry *pde = PDE(inode); 646 struct clusterip_config *c = PDE(inode)->data;
649 struct clusterip_config *c = pde->data;
650 int ret; 647 int ret;
651 648
652 ret = seq_release(inode, file); 649 ret = seq_release(inode, file);
@@ -660,10 +657,9 @@ static int clusterip_proc_release(struct inode *inode, struct file *file)
660static ssize_t clusterip_proc_write(struct file *file, const char __user *input, 657static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
661 size_t size, loff_t *ofs) 658 size_t size, loff_t *ofs)
662{ 659{
660 struct clusterip_config *c = PDE(file->f_path.dentry->d_inode)->data;
663#define PROC_WRITELEN 10 661#define PROC_WRITELEN 10
664 char buffer[PROC_WRITELEN+1]; 662 char buffer[PROC_WRITELEN+1];
665 const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
666 struct clusterip_config *c = pde->data;
667 unsigned long nodenum; 663 unsigned long nodenum;
668 664
669 if (copy_from_user(buffer, input, PROC_WRITELEN)) 665 if (copy_from_user(buffer, input, PROC_WRITELEN))
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 399061c3fd7..09a5d3f7cc4 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -338,7 +338,7 @@ struct compat_ipt_ulog_info {
338 char prefix[ULOG_PREFIX_LEN]; 338 char prefix[ULOG_PREFIX_LEN];
339}; 339};
340 340
341static void ulog_tg_compat_from_user(void *dst, void *src) 341static void ulog_tg_compat_from_user(void *dst, const void *src)
342{ 342{
343 const struct compat_ipt_ulog_info *cl = src; 343 const struct compat_ipt_ulog_info *cl = src;
344 struct ipt_ulog_info l = { 344 struct ipt_ulog_info l = {
@@ -351,7 +351,7 @@ static void ulog_tg_compat_from_user(void *dst, void *src)
351 memcpy(dst, &l, sizeof(l)); 351 memcpy(dst, &l, sizeof(l));
352} 352}
353 353
354static int ulog_tg_compat_to_user(void __user *dst, void *src) 354static int ulog_tg_compat_to_user(void __user *dst, const void *src)
355{ 355{
356 const struct ipt_ulog_info *l = src; 356 const struct ipt_ulog_info *l = src;
357 struct compat_ipt_ulog_info cl = { 357 struct compat_ipt_ulog_info cl = {
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index df566cbd68e..c8dc9800d62 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -23,104 +23,32 @@ MODULE_DESCRIPTION("iptables filter table");
23 (1 << NF_INET_FORWARD) | \ 23 (1 << NF_INET_FORWARD) | \
24 (1 << NF_INET_LOCAL_OUT)) 24 (1 << NF_INET_LOCAL_OUT))
25 25
26static struct
27{
28 struct ipt_replace repl;
29 struct ipt_standard entries[3];
30 struct ipt_error term;
31} initial_table __net_initdata = {
32 .repl = {
33 .name = "filter",
34 .valid_hooks = FILTER_VALID_HOOKS,
35 .num_entries = 4,
36 .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
37 .hook_entry = {
38 [NF_INET_LOCAL_IN] = 0,
39 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
40 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
41 },
42 .underflow = {
43 [NF_INET_LOCAL_IN] = 0,
44 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
45 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
46 },
47 },
48 .entries = {
49 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
50 IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
51 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
52 },
53 .term = IPT_ERROR_INIT, /* ERROR */
54};
55
56static const struct xt_table packet_filter = { 26static const struct xt_table packet_filter = {
57 .name = "filter", 27 .name = "filter",
58 .valid_hooks = FILTER_VALID_HOOKS, 28 .valid_hooks = FILTER_VALID_HOOKS,
59 .me = THIS_MODULE, 29 .me = THIS_MODULE,
60 .af = NFPROTO_IPV4, 30 .af = NFPROTO_IPV4,
31 .priority = NF_IP_PRI_FILTER,
61}; 32};
62 33
63/* The work comes in here from netfilter.c. */
64static unsigned int
65ipt_local_in_hook(unsigned int hook,
66 struct sk_buff *skb,
67 const struct net_device *in,
68 const struct net_device *out,
69 int (*okfn)(struct sk_buff *))
70{
71 return ipt_do_table(skb, hook, in, out,
72 dev_net(in)->ipv4.iptable_filter);
73}
74
75static unsigned int 34static unsigned int
76ipt_hook(unsigned int hook, 35iptable_filter_hook(unsigned int hook, struct sk_buff *skb,
77 struct sk_buff *skb, 36 const struct net_device *in, const struct net_device *out,
78 const struct net_device *in, 37 int (*okfn)(struct sk_buff *))
79 const struct net_device *out,
80 int (*okfn)(struct sk_buff *))
81{ 38{
82 return ipt_do_table(skb, hook, in, out, 39 const struct net *net;
83 dev_net(in)->ipv4.iptable_filter);
84}
85 40
86static unsigned int 41 if (hook == NF_INET_LOCAL_OUT &&
87ipt_local_out_hook(unsigned int hook, 42 (skb->len < sizeof(struct iphdr) ||
88 struct sk_buff *skb, 43 ip_hdrlen(skb) < sizeof(struct iphdr)))
89 const struct net_device *in, 44 /* root is playing with raw sockets. */
90 const struct net_device *out,
91 int (*okfn)(struct sk_buff *))
92{
93 /* root is playing with raw sockets. */
94 if (skb->len < sizeof(struct iphdr) ||
95 ip_hdrlen(skb) < sizeof(struct iphdr))
96 return NF_ACCEPT; 45 return NF_ACCEPT;
97 return ipt_do_table(skb, hook, in, out, 46
98 dev_net(out)->ipv4.iptable_filter); 47 net = dev_net((in != NULL) ? in : out);
48 return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_filter);
99} 49}
100 50
101static struct nf_hook_ops ipt_ops[] __read_mostly = { 51static struct nf_hook_ops *filter_ops __read_mostly;
102 {
103 .hook = ipt_local_in_hook,
104 .owner = THIS_MODULE,
105 .pf = NFPROTO_IPV4,
106 .hooknum = NF_INET_LOCAL_IN,
107 .priority = NF_IP_PRI_FILTER,
108 },
109 {
110 .hook = ipt_hook,
111 .owner = THIS_MODULE,
112 .pf = NFPROTO_IPV4,
113 .hooknum = NF_INET_FORWARD,
114 .priority = NF_IP_PRI_FILTER,
115 },
116 {
117 .hook = ipt_local_out_hook,
118 .owner = THIS_MODULE,
119 .pf = NFPROTO_IPV4,
120 .hooknum = NF_INET_LOCAL_OUT,
121 .priority = NF_IP_PRI_FILTER,
122 },
123};
124 52
125/* Default to forward because I got too much mail already. */ 53/* Default to forward because I got too much mail already. */
126static int forward = NF_ACCEPT; 54static int forward = NF_ACCEPT;
@@ -128,9 +56,18 @@ module_param(forward, bool, 0000);
128 56
129static int __net_init iptable_filter_net_init(struct net *net) 57static int __net_init iptable_filter_net_init(struct net *net)
130{ 58{
131 /* Register table */ 59 struct ipt_replace *repl;
60
61 repl = ipt_alloc_initial_table(&packet_filter);
62 if (repl == NULL)
63 return -ENOMEM;
64 /* Entry 1 is the FORWARD hook */
65 ((struct ipt_standard *)repl->entries)[1].target.verdict =
66 -forward - 1;
67
132 net->ipv4.iptable_filter = 68 net->ipv4.iptable_filter =
133 ipt_register_table(net, &packet_filter, &initial_table.repl); 69 ipt_register_table(net, &packet_filter, repl);
70 kfree(repl);
134 if (IS_ERR(net->ipv4.iptable_filter)) 71 if (IS_ERR(net->ipv4.iptable_filter))
135 return PTR_ERR(net->ipv4.iptable_filter); 72 return PTR_ERR(net->ipv4.iptable_filter);
136 return 0; 73 return 0;
@@ -138,7 +75,7 @@ static int __net_init iptable_filter_net_init(struct net *net)
138 75
139static void __net_exit iptable_filter_net_exit(struct net *net) 76static void __net_exit iptable_filter_net_exit(struct net *net)
140{ 77{
141 ipt_unregister_table(net->ipv4.iptable_filter); 78 ipt_unregister_table(net, net->ipv4.iptable_filter);
142} 79}
143 80
144static struct pernet_operations iptable_filter_net_ops = { 81static struct pernet_operations iptable_filter_net_ops = {
@@ -155,17 +92,16 @@ static int __init iptable_filter_init(void)
155 return -EINVAL; 92 return -EINVAL;
156 } 93 }
157 94
158 /* Entry 1 is the FORWARD hook */
159 initial_table.entries[1].target.verdict = -forward - 1;
160
161 ret = register_pernet_subsys(&iptable_filter_net_ops); 95 ret = register_pernet_subsys(&iptable_filter_net_ops);
162 if (ret < 0) 96 if (ret < 0)
163 return ret; 97 return ret;
164 98
165 /* Register hooks */ 99 /* Register hooks */
166 ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 100 filter_ops = xt_hook_link(&packet_filter, iptable_filter_hook);
167 if (ret < 0) 101 if (IS_ERR(filter_ops)) {
102 ret = PTR_ERR(filter_ops);
168 goto cleanup_table; 103 goto cleanup_table;
104 }
169 105
170 return ret; 106 return ret;
171 107
@@ -176,7 +112,7 @@ static int __init iptable_filter_init(void)
176 112
177static void __exit iptable_filter_fini(void) 113static void __exit iptable_filter_fini(void)
178{ 114{
179 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 115 xt_hook_unlink(&packet_filter, filter_ops);
180 unregister_pernet_subsys(&iptable_filter_net_ops); 116 unregister_pernet_subsys(&iptable_filter_net_ops);
181} 117}
182 118
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index fae78c3076c..b9b83464cbf 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -27,101 +27,16 @@ MODULE_DESCRIPTION("iptables mangle table");
27 (1 << NF_INET_LOCAL_OUT) | \ 27 (1 << NF_INET_LOCAL_OUT) | \
28 (1 << NF_INET_POST_ROUTING)) 28 (1 << NF_INET_POST_ROUTING))
29 29
30/* Ouch - five different hooks? Maybe this should be a config option..... -- BC */
31static const struct
32{
33 struct ipt_replace repl;
34 struct ipt_standard entries[5];
35 struct ipt_error term;
36} initial_table __net_initdata = {
37 .repl = {
38 .name = "mangle",
39 .valid_hooks = MANGLE_VALID_HOOKS,
40 .num_entries = 6,
41 .size = sizeof(struct ipt_standard) * 5 + sizeof(struct ipt_error),
42 .hook_entry = {
43 [NF_INET_PRE_ROUTING] = 0,
44 [NF_INET_LOCAL_IN] = sizeof(struct ipt_standard),
45 [NF_INET_FORWARD] = sizeof(struct ipt_standard) * 2,
46 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
47 [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard) * 4,
48 },
49 .underflow = {
50 [NF_INET_PRE_ROUTING] = 0,
51 [NF_INET_LOCAL_IN] = sizeof(struct ipt_standard),
52 [NF_INET_FORWARD] = sizeof(struct ipt_standard) * 2,
53 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
54 [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard) * 4,
55 },
56 },
57 .entries = {
58 IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
59 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
60 IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
61 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
62 IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
63 },
64 .term = IPT_ERROR_INIT, /* ERROR */
65};
66
67static const struct xt_table packet_mangler = { 30static const struct xt_table packet_mangler = {
68 .name = "mangle", 31 .name = "mangle",
69 .valid_hooks = MANGLE_VALID_HOOKS, 32 .valid_hooks = MANGLE_VALID_HOOKS,
70 .me = THIS_MODULE, 33 .me = THIS_MODULE,
71 .af = NFPROTO_IPV4, 34 .af = NFPROTO_IPV4,
35 .priority = NF_IP_PRI_MANGLE,
72}; 36};
73 37
74/* The work comes in here from netfilter.c. */
75static unsigned int
76ipt_pre_routing_hook(unsigned int hook,
77 struct sk_buff *skb,
78 const struct net_device *in,
79 const struct net_device *out,
80 int (*okfn)(struct sk_buff *))
81{
82 return ipt_do_table(skb, hook, in, out,
83 dev_net(in)->ipv4.iptable_mangle);
84}
85
86static unsigned int
87ipt_post_routing_hook(unsigned int hook,
88 struct sk_buff *skb,
89 const struct net_device *in,
90 const struct net_device *out,
91 int (*okfn)(struct sk_buff *))
92{
93 return ipt_do_table(skb, hook, in, out,
94 dev_net(out)->ipv4.iptable_mangle);
95}
96
97static unsigned int
98ipt_local_in_hook(unsigned int hook,
99 struct sk_buff *skb,
100 const struct net_device *in,
101 const struct net_device *out,
102 int (*okfn)(struct sk_buff *))
103{
104 return ipt_do_table(skb, hook, in, out,
105 dev_net(in)->ipv4.iptable_mangle);
106}
107
108static unsigned int
109ipt_forward_hook(unsigned int hook,
110 struct sk_buff *skb,
111 const struct net_device *in,
112 const struct net_device *out,
113 int (*okfn)(struct sk_buff *))
114{
115 return ipt_do_table(skb, hook, in, out,
116 dev_net(in)->ipv4.iptable_mangle);
117}
118
119static unsigned int 38static unsigned int
120ipt_local_hook(unsigned int hook, 39ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
121 struct sk_buff *skb,
122 const struct net_device *in,
123 const struct net_device *out,
124 int (*okfn)(struct sk_buff *))
125{ 40{
126 unsigned int ret; 41 unsigned int ret;
127 const struct iphdr *iph; 42 const struct iphdr *iph;
@@ -141,7 +56,7 @@ ipt_local_hook(unsigned int hook,
141 daddr = iph->daddr; 56 daddr = iph->daddr;
142 tos = iph->tos; 57 tos = iph->tos;
143 58
144 ret = ipt_do_table(skb, hook, in, out, 59 ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
145 dev_net(out)->ipv4.iptable_mangle); 60 dev_net(out)->ipv4.iptable_mangle);
146 /* Reroute for ANY change. */ 61 /* Reroute for ANY change. */
147 if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) { 62 if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) {
@@ -158,49 +73,36 @@ ipt_local_hook(unsigned int hook,
158 return ret; 73 return ret;
159} 74}
160 75
161static struct nf_hook_ops ipt_ops[] __read_mostly = { 76/* The work comes in here from netfilter.c. */
162 { 77static unsigned int
163 .hook = ipt_pre_routing_hook, 78iptable_mangle_hook(unsigned int hook,
164 .owner = THIS_MODULE, 79 struct sk_buff *skb,
165 .pf = NFPROTO_IPV4, 80 const struct net_device *in,
166 .hooknum = NF_INET_PRE_ROUTING, 81 const struct net_device *out,
167 .priority = NF_IP_PRI_MANGLE, 82 int (*okfn)(struct sk_buff *))
168 }, 83{
169 { 84 if (hook == NF_INET_LOCAL_OUT)
170 .hook = ipt_local_in_hook, 85 return ipt_mangle_out(skb, out);
171 .owner = THIS_MODULE, 86 if (hook == NF_INET_POST_ROUTING)
172 .pf = NFPROTO_IPV4, 87 return ipt_do_table(skb, hook, in, out,
173 .hooknum = NF_INET_LOCAL_IN, 88 dev_net(out)->ipv4.iptable_mangle);
174 .priority = NF_IP_PRI_MANGLE, 89 /* PREROUTING/INPUT/FORWARD: */
175 }, 90 return ipt_do_table(skb, hook, in, out,
176 { 91 dev_net(in)->ipv4.iptable_mangle);
177 .hook = ipt_forward_hook, 92}
178 .owner = THIS_MODULE, 93
179 .pf = NFPROTO_IPV4, 94static struct nf_hook_ops *mangle_ops __read_mostly;
180 .hooknum = NF_INET_FORWARD,
181 .priority = NF_IP_PRI_MANGLE,
182 },
183 {
184 .hook = ipt_local_hook,
185 .owner = THIS_MODULE,
186 .pf = NFPROTO_IPV4,
187 .hooknum = NF_INET_LOCAL_OUT,
188 .priority = NF_IP_PRI_MANGLE,
189 },
190 {
191 .hook = ipt_post_routing_hook,
192 .owner = THIS_MODULE,
193 .pf = NFPROTO_IPV4,
194 .hooknum = NF_INET_POST_ROUTING,
195 .priority = NF_IP_PRI_MANGLE,
196 },
197};
198 95
199static int __net_init iptable_mangle_net_init(struct net *net) 96static int __net_init iptable_mangle_net_init(struct net *net)
200{ 97{
201 /* Register table */ 98 struct ipt_replace *repl;
99
100 repl = ipt_alloc_initial_table(&packet_mangler);
101 if (repl == NULL)
102 return -ENOMEM;
202 net->ipv4.iptable_mangle = 103 net->ipv4.iptable_mangle =
203 ipt_register_table(net, &packet_mangler, &initial_table.repl); 104 ipt_register_table(net, &packet_mangler, repl);
105 kfree(repl);
204 if (IS_ERR(net->ipv4.iptable_mangle)) 106 if (IS_ERR(net->ipv4.iptable_mangle))
205 return PTR_ERR(net->ipv4.iptable_mangle); 107 return PTR_ERR(net->ipv4.iptable_mangle);
206 return 0; 108 return 0;
@@ -208,7 +110,7 @@ static int __net_init iptable_mangle_net_init(struct net *net)
208 110
209static void __net_exit iptable_mangle_net_exit(struct net *net) 111static void __net_exit iptable_mangle_net_exit(struct net *net)
210{ 112{
211 ipt_unregister_table(net->ipv4.iptable_mangle); 113 ipt_unregister_table(net, net->ipv4.iptable_mangle);
212} 114}
213 115
214static struct pernet_operations iptable_mangle_net_ops = { 116static struct pernet_operations iptable_mangle_net_ops = {
@@ -225,9 +127,11 @@ static int __init iptable_mangle_init(void)
225 return ret; 127 return ret;
226 128
227 /* Register hooks */ 129 /* Register hooks */
228 ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 130 mangle_ops = xt_hook_link(&packet_mangler, iptable_mangle_hook);
229 if (ret < 0) 131 if (IS_ERR(mangle_ops)) {
132 ret = PTR_ERR(mangle_ops);
230 goto cleanup_table; 133 goto cleanup_table;
134 }
231 135
232 return ret; 136 return ret;
233 137
@@ -238,7 +142,7 @@ static int __init iptable_mangle_init(void)
238 142
239static void __exit iptable_mangle_fini(void) 143static void __exit iptable_mangle_fini(void)
240{ 144{
241 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 145 xt_hook_unlink(&packet_mangler, mangle_ops);
242 unregister_pernet_subsys(&iptable_mangle_net_ops); 146 unregister_pernet_subsys(&iptable_mangle_net_ops);
243} 147}
244 148
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 993edc23be0..06fb9d11953 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -9,90 +9,44 @@
9 9
10#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) 10#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
11 11
12static const struct
13{
14 struct ipt_replace repl;
15 struct ipt_standard entries[2];
16 struct ipt_error term;
17} initial_table __net_initdata = {
18 .repl = {
19 .name = "raw",
20 .valid_hooks = RAW_VALID_HOOKS,
21 .num_entries = 3,
22 .size = sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error),
23 .hook_entry = {
24 [NF_INET_PRE_ROUTING] = 0,
25 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard)
26 },
27 .underflow = {
28 [NF_INET_PRE_ROUTING] = 0,
29 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard)
30 },
31 },
32 .entries = {
33 IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
34 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
35 },
36 .term = IPT_ERROR_INIT, /* ERROR */
37};
38
39static const struct xt_table packet_raw = { 12static const struct xt_table packet_raw = {
40 .name = "raw", 13 .name = "raw",
41 .valid_hooks = RAW_VALID_HOOKS, 14 .valid_hooks = RAW_VALID_HOOKS,
42 .me = THIS_MODULE, 15 .me = THIS_MODULE,
43 .af = NFPROTO_IPV4, 16 .af = NFPROTO_IPV4,
17 .priority = NF_IP_PRI_RAW,
44}; 18};
45 19
46/* The work comes in here from netfilter.c. */ 20/* The work comes in here from netfilter.c. */
47static unsigned int 21static unsigned int
48ipt_hook(unsigned int hook, 22iptable_raw_hook(unsigned int hook, struct sk_buff *skb,
49 struct sk_buff *skb, 23 const struct net_device *in, const struct net_device *out,
50 const struct net_device *in, 24 int (*okfn)(struct sk_buff *))
51 const struct net_device *out,
52 int (*okfn)(struct sk_buff *))
53{ 25{
54 return ipt_do_table(skb, hook, in, out, 26 const struct net *net;
55 dev_net(in)->ipv4.iptable_raw);
56}
57 27
58static unsigned int 28 if (hook == NF_INET_LOCAL_OUT &&
59ipt_local_hook(unsigned int hook, 29 (skb->len < sizeof(struct iphdr) ||
60 struct sk_buff *skb, 30 ip_hdrlen(skb) < sizeof(struct iphdr)))
61 const struct net_device *in, 31 /* root is playing with raw sockets. */
62 const struct net_device *out,
63 int (*okfn)(struct sk_buff *))
64{
65 /* root is playing with raw sockets. */
66 if (skb->len < sizeof(struct iphdr) ||
67 ip_hdrlen(skb) < sizeof(struct iphdr))
68 return NF_ACCEPT; 32 return NF_ACCEPT;
69 return ipt_do_table(skb, hook, in, out, 33
70 dev_net(out)->ipv4.iptable_raw); 34 net = dev_net((in != NULL) ? in : out);
35 return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_raw);
71} 36}
72 37
73/* 'raw' is the very first table. */ 38static struct nf_hook_ops *rawtable_ops __read_mostly;
74static struct nf_hook_ops ipt_ops[] __read_mostly = {
75 {
76 .hook = ipt_hook,
77 .pf = NFPROTO_IPV4,
78 .hooknum = NF_INET_PRE_ROUTING,
79 .priority = NF_IP_PRI_RAW,
80 .owner = THIS_MODULE,
81 },
82 {
83 .hook = ipt_local_hook,
84 .pf = NFPROTO_IPV4,
85 .hooknum = NF_INET_LOCAL_OUT,
86 .priority = NF_IP_PRI_RAW,
87 .owner = THIS_MODULE,
88 },
89};
90 39
91static int __net_init iptable_raw_net_init(struct net *net) 40static int __net_init iptable_raw_net_init(struct net *net)
92{ 41{
93 /* Register table */ 42 struct ipt_replace *repl;
43
44 repl = ipt_alloc_initial_table(&packet_raw);
45 if (repl == NULL)
46 return -ENOMEM;
94 net->ipv4.iptable_raw = 47 net->ipv4.iptable_raw =
95 ipt_register_table(net, &packet_raw, &initial_table.repl); 48 ipt_register_table(net, &packet_raw, repl);
49 kfree(repl);
96 if (IS_ERR(net->ipv4.iptable_raw)) 50 if (IS_ERR(net->ipv4.iptable_raw))
97 return PTR_ERR(net->ipv4.iptable_raw); 51 return PTR_ERR(net->ipv4.iptable_raw);
98 return 0; 52 return 0;
@@ -100,7 +54,7 @@ static int __net_init iptable_raw_net_init(struct net *net)
100 54
101static void __net_exit iptable_raw_net_exit(struct net *net) 55static void __net_exit iptable_raw_net_exit(struct net *net)
102{ 56{
103 ipt_unregister_table(net->ipv4.iptable_raw); 57 ipt_unregister_table(net, net->ipv4.iptable_raw);
104} 58}
105 59
106static struct pernet_operations iptable_raw_net_ops = { 60static struct pernet_operations iptable_raw_net_ops = {
@@ -117,9 +71,11 @@ static int __init iptable_raw_init(void)
117 return ret; 71 return ret;
118 72
119 /* Register hooks */ 73 /* Register hooks */
120 ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 74 rawtable_ops = xt_hook_link(&packet_raw, iptable_raw_hook);
121 if (ret < 0) 75 if (IS_ERR(rawtable_ops)) {
76 ret = PTR_ERR(rawtable_ops);
122 goto cleanup_table; 77 goto cleanup_table;
78 }
123 79
124 return ret; 80 return ret;
125 81
@@ -130,7 +86,7 @@ static int __init iptable_raw_init(void)
130 86
131static void __exit iptable_raw_fini(void) 87static void __exit iptable_raw_fini(void)
132{ 88{
133 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 89 xt_hook_unlink(&packet_raw, rawtable_ops);
134 unregister_pernet_subsys(&iptable_raw_net_ops); 90 unregister_pernet_subsys(&iptable_raw_net_ops);
135} 91}
136 92
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index 3bd3d6388da..cce2f64e6f2 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -27,109 +27,44 @@ MODULE_DESCRIPTION("iptables security table, for MAC rules");
27 (1 << NF_INET_FORWARD) | \ 27 (1 << NF_INET_FORWARD) | \
28 (1 << NF_INET_LOCAL_OUT) 28 (1 << NF_INET_LOCAL_OUT)
29 29
30static const struct
31{
32 struct ipt_replace repl;
33 struct ipt_standard entries[3];
34 struct ipt_error term;
35} initial_table __net_initdata = {
36 .repl = {
37 .name = "security",
38 .valid_hooks = SECURITY_VALID_HOOKS,
39 .num_entries = 4,
40 .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
41 .hook_entry = {
42 [NF_INET_LOCAL_IN] = 0,
43 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
44 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
45 },
46 .underflow = {
47 [NF_INET_LOCAL_IN] = 0,
48 [NF_INET_FORWARD] = sizeof(struct ipt_standard),
49 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
50 },
51 },
52 .entries = {
53 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
54 IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
55 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
56 },
57 .term = IPT_ERROR_INIT, /* ERROR */
58};
59
60static const struct xt_table security_table = { 30static const struct xt_table security_table = {
61 .name = "security", 31 .name = "security",
62 .valid_hooks = SECURITY_VALID_HOOKS, 32 .valid_hooks = SECURITY_VALID_HOOKS,
63 .me = THIS_MODULE, 33 .me = THIS_MODULE,
64 .af = NFPROTO_IPV4, 34 .af = NFPROTO_IPV4,
35 .priority = NF_IP_PRI_SECURITY,
65}; 36};
66 37
67static unsigned int 38static unsigned int
68ipt_local_in_hook(unsigned int hook, 39iptable_security_hook(unsigned int hook, struct sk_buff *skb,
69 struct sk_buff *skb, 40 const struct net_device *in,
70 const struct net_device *in, 41 const struct net_device *out,
71 const struct net_device *out, 42 int (*okfn)(struct sk_buff *))
72 int (*okfn)(struct sk_buff *))
73{
74 return ipt_do_table(skb, hook, in, out,
75 dev_net(in)->ipv4.iptable_security);
76}
77
78static unsigned int
79ipt_forward_hook(unsigned int hook,
80 struct sk_buff *skb,
81 const struct net_device *in,
82 const struct net_device *out,
83 int (*okfn)(struct sk_buff *))
84{ 43{
85 return ipt_do_table(skb, hook, in, out, 44 const struct net *net;
86 dev_net(in)->ipv4.iptable_security);
87}
88 45
89static unsigned int 46 if (hook == NF_INET_LOCAL_OUT &&
90ipt_local_out_hook(unsigned int hook, 47 (skb->len < sizeof(struct iphdr) ||
91 struct sk_buff *skb, 48 ip_hdrlen(skb) < sizeof(struct iphdr)))
92 const struct net_device *in, 49 /* Somebody is playing with raw sockets. */
93 const struct net_device *out,
94 int (*okfn)(struct sk_buff *))
95{
96 /* Somebody is playing with raw sockets. */
97 if (skb->len < sizeof(struct iphdr) ||
98 ip_hdrlen(skb) < sizeof(struct iphdr))
99 return NF_ACCEPT; 50 return NF_ACCEPT;
100 return ipt_do_table(skb, hook, in, out, 51
101 dev_net(out)->ipv4.iptable_security); 52 net = dev_net((in != NULL) ? in : out);
53 return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_security);
102} 54}
103 55
104static struct nf_hook_ops ipt_ops[] __read_mostly = { 56static struct nf_hook_ops *sectbl_ops __read_mostly;
105 {
106 .hook = ipt_local_in_hook,
107 .owner = THIS_MODULE,
108 .pf = NFPROTO_IPV4,
109 .hooknum = NF_INET_LOCAL_IN,
110 .priority = NF_IP_PRI_SECURITY,
111 },
112 {
113 .hook = ipt_forward_hook,
114 .owner = THIS_MODULE,
115 .pf = NFPROTO_IPV4,
116 .hooknum = NF_INET_FORWARD,
117 .priority = NF_IP_PRI_SECURITY,
118 },
119 {
120 .hook = ipt_local_out_hook,
121 .owner = THIS_MODULE,
122 .pf = NFPROTO_IPV4,
123 .hooknum = NF_INET_LOCAL_OUT,
124 .priority = NF_IP_PRI_SECURITY,
125 },
126};
127 57
128static int __net_init iptable_security_net_init(struct net *net) 58static int __net_init iptable_security_net_init(struct net *net)
129{ 59{
130 net->ipv4.iptable_security = 60 struct ipt_replace *repl;
131 ipt_register_table(net, &security_table, &initial_table.repl);
132 61
62 repl = ipt_alloc_initial_table(&security_table);
63 if (repl == NULL)
64 return -ENOMEM;
65 net->ipv4.iptable_security =
66 ipt_register_table(net, &security_table, repl);
67 kfree(repl);
133 if (IS_ERR(net->ipv4.iptable_security)) 68 if (IS_ERR(net->ipv4.iptable_security))
134 return PTR_ERR(net->ipv4.iptable_security); 69 return PTR_ERR(net->ipv4.iptable_security);
135 70
@@ -138,7 +73,7 @@ static int __net_init iptable_security_net_init(struct net *net)
138 73
139static void __net_exit iptable_security_net_exit(struct net *net) 74static void __net_exit iptable_security_net_exit(struct net *net)
140{ 75{
141 ipt_unregister_table(net->ipv4.iptable_security); 76 ipt_unregister_table(net, net->ipv4.iptable_security);
142} 77}
143 78
144static struct pernet_operations iptable_security_net_ops = { 79static struct pernet_operations iptable_security_net_ops = {
@@ -154,9 +89,11 @@ static int __init iptable_security_init(void)
154 if (ret < 0) 89 if (ret < 0)
155 return ret; 90 return ret;
156 91
157 ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 92 sectbl_ops = xt_hook_link(&security_table, iptable_security_hook);
158 if (ret < 0) 93 if (IS_ERR(sectbl_ops)) {
94 ret = PTR_ERR(sectbl_ops);
159 goto cleanup_table; 95 goto cleanup_table;
96 }
160 97
161 return ret; 98 return ret;
162 99
@@ -167,7 +104,7 @@ cleanup_table:
167 104
168static void __exit iptable_security_fini(void) 105static void __exit iptable_security_fini(void)
169{ 106{
170 nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops)); 107 xt_hook_unlink(&security_table, sectbl_ops);
171 unregister_pernet_subsys(&iptable_security_net_ops); 108 unregister_pernet_subsys(&iptable_security_net_ops);
172} 109}
173 110
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index d1ea38a7c49..2bb1f87051c 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -22,6 +22,7 @@
22#include <net/netfilter/nf_conntrack_helper.h> 22#include <net/netfilter/nf_conntrack_helper.h>
23#include <net/netfilter/nf_conntrack_l4proto.h> 23#include <net/netfilter/nf_conntrack_l4proto.h>
24#include <net/netfilter/nf_conntrack_l3proto.h> 24#include <net/netfilter/nf_conntrack_l3proto.h>
25#include <net/netfilter/nf_conntrack_zones.h>
25#include <net/netfilter/nf_conntrack_core.h> 26#include <net/netfilter/nf_conntrack_core.h>
26#include <net/netfilter/ipv4/nf_conntrack_ipv4.h> 27#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
27#include <net/netfilter/nf_nat_helper.h> 28#include <net/netfilter/nf_nat_helper.h>
@@ -266,7 +267,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
266 return -EINVAL; 267 return -EINVAL;
267 } 268 }
268 269
269 h = nf_conntrack_find_get(sock_net(sk), &tuple); 270 h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
270 if (h) { 271 if (h) {
271 struct sockaddr_in sin; 272 struct sockaddr_in sin;
272 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); 273 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 7afd39b5b78..7404bde9599 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -18,6 +18,7 @@
18#include <net/netfilter/nf_conntrack_tuple.h> 18#include <net/netfilter/nf_conntrack_tuple.h>
19#include <net/netfilter/nf_conntrack_l4proto.h> 19#include <net/netfilter/nf_conntrack_l4proto.h>
20#include <net/netfilter/nf_conntrack_core.h> 20#include <net/netfilter/nf_conntrack_core.h>
21#include <net/netfilter/nf_conntrack_zones.h>
21#include <net/netfilter/nf_log.h> 22#include <net/netfilter/nf_log.h>
22 23
23static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ; 24static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
@@ -114,13 +115,14 @@ static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
114 115
115/* Returns conntrack if it dealt with ICMP, and filled in skb fields */ 116/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
116static int 117static int
117icmp_error_message(struct net *net, struct sk_buff *skb, 118icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
118 enum ip_conntrack_info *ctinfo, 119 enum ip_conntrack_info *ctinfo,
119 unsigned int hooknum) 120 unsigned int hooknum)
120{ 121{
121 struct nf_conntrack_tuple innertuple, origtuple; 122 struct nf_conntrack_tuple innertuple, origtuple;
122 const struct nf_conntrack_l4proto *innerproto; 123 const struct nf_conntrack_l4proto *innerproto;
123 const struct nf_conntrack_tuple_hash *h; 124 const struct nf_conntrack_tuple_hash *h;
125 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
124 126
125 NF_CT_ASSERT(skb->nfct == NULL); 127 NF_CT_ASSERT(skb->nfct == NULL);
126 128
@@ -146,7 +148,7 @@ icmp_error_message(struct net *net, struct sk_buff *skb,
146 148
147 *ctinfo = IP_CT_RELATED; 149 *ctinfo = IP_CT_RELATED;
148 150
149 h = nf_conntrack_find_get(net, &innertuple); 151 h = nf_conntrack_find_get(net, zone, &innertuple);
150 if (!h) { 152 if (!h) {
151 pr_debug("icmp_error_message: no match\n"); 153 pr_debug("icmp_error_message: no match\n");
152 return -NF_ACCEPT; 154 return -NF_ACCEPT;
@@ -163,7 +165,8 @@ icmp_error_message(struct net *net, struct sk_buff *skb,
163 165
164/* Small and modified version of icmp_rcv */ 166/* Small and modified version of icmp_rcv */
165static int 167static int
166icmp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff, 168icmp_error(struct net *net, struct nf_conn *tmpl,
169 struct sk_buff *skb, unsigned int dataoff,
167 enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) 170 enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum)
168{ 171{
169 const struct icmphdr *icmph; 172 const struct icmphdr *icmph;
@@ -208,7 +211,7 @@ icmp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
208 icmph->type != ICMP_REDIRECT) 211 icmph->type != ICMP_REDIRECT)
209 return NF_ACCEPT; 212 return NF_ACCEPT;
210 213
211 return icmp_error_message(net, skb, ctinfo, hooknum); 214 return icmp_error_message(net, tmpl, skb, ctinfo, hooknum);
212} 215}
213 216
214#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 217#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 331ead3ebd1..cb763ae9ed9 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -17,6 +17,10 @@
17#include <linux/netfilter_bridge.h> 17#include <linux/netfilter_bridge.h>
18#include <linux/netfilter_ipv4.h> 18#include <linux/netfilter_ipv4.h>
19#include <net/netfilter/ipv4/nf_defrag_ipv4.h> 19#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
20#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
21#include <net/netfilter/nf_conntrack.h>
22#endif
23#include <net/netfilter/nf_conntrack_zones.h>
20 24
21/* Returns new sk_buff, or NULL */ 25/* Returns new sk_buff, or NULL */
22static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) 26static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
@@ -38,15 +42,22 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
38static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum, 42static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
39 struct sk_buff *skb) 43 struct sk_buff *skb)
40{ 44{
45 u16 zone = NF_CT_DEFAULT_ZONE;
46
47#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
48 if (skb->nfct)
49 zone = nf_ct_zone((struct nf_conn *)skb->nfct);
50#endif
51
41#ifdef CONFIG_BRIDGE_NETFILTER 52#ifdef CONFIG_BRIDGE_NETFILTER
42 if (skb->nf_bridge && 53 if (skb->nf_bridge &&
43 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) 54 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
44 return IP_DEFRAG_CONNTRACK_BRIDGE_IN; 55 return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
45#endif 56#endif
46 if (hooknum == NF_INET_PRE_ROUTING) 57 if (hooknum == NF_INET_PRE_ROUTING)
47 return IP_DEFRAG_CONNTRACK_IN; 58 return IP_DEFRAG_CONNTRACK_IN + zone;
48 else 59 else
49 return IP_DEFRAG_CONNTRACK_OUT; 60 return IP_DEFRAG_CONNTRACK_OUT + zone;
50} 61}
51 62
52static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, 63static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
@@ -59,7 +70,7 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
59#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE) 70#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE)
60 /* Previously seen (loopback)? Ignore. Do this before 71 /* Previously seen (loopback)? Ignore. Do this before
61 fragment check. */ 72 fragment check. */
62 if (skb->nfct) 73 if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
63 return NF_ACCEPT; 74 return NF_ACCEPT;
64#endif 75#endif
65#endif 76#endif
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 26066a2327a..4595281c286 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -30,6 +30,7 @@
30#include <net/netfilter/nf_conntrack_helper.h> 30#include <net/netfilter/nf_conntrack_helper.h>
31#include <net/netfilter/nf_conntrack_l3proto.h> 31#include <net/netfilter/nf_conntrack_l3proto.h>
32#include <net/netfilter/nf_conntrack_l4proto.h> 32#include <net/netfilter/nf_conntrack_l4proto.h>
33#include <net/netfilter/nf_conntrack_zones.h>
33 34
34static DEFINE_SPINLOCK(nf_nat_lock); 35static DEFINE_SPINLOCK(nf_nat_lock);
35 36
@@ -69,13 +70,14 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
69 70
70/* We keep an extra hash for each conntrack, for fast searching. */ 71/* We keep an extra hash for each conntrack, for fast searching. */
71static inline unsigned int 72static inline unsigned int
72hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple) 73hash_by_src(const struct net *net, u16 zone,
74 const struct nf_conntrack_tuple *tuple)
73{ 75{
74 unsigned int hash; 76 unsigned int hash;
75 77
76 /* Original src, to ensure we map it consistently if poss. */ 78 /* Original src, to ensure we map it consistently if poss. */
77 hash = jhash_3words((__force u32)tuple->src.u3.ip, 79 hash = jhash_3words((__force u32)tuple->src.u3.ip,
78 (__force u32)tuple->src.u.all, 80 (__force u32)tuple->src.u.all ^ zone,
79 tuple->dst.protonum, 0); 81 tuple->dst.protonum, 0);
80 return ((u64)hash * net->ipv4.nat_htable_size) >> 32; 82 return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
81} 83}
@@ -139,12 +141,12 @@ same_src(const struct nf_conn *ct,
139 141
140/* Only called for SRC manip */ 142/* Only called for SRC manip */
141static int 143static int
142find_appropriate_src(struct net *net, 144find_appropriate_src(struct net *net, u16 zone,
143 const struct nf_conntrack_tuple *tuple, 145 const struct nf_conntrack_tuple *tuple,
144 struct nf_conntrack_tuple *result, 146 struct nf_conntrack_tuple *result,
145 const struct nf_nat_range *range) 147 const struct nf_nat_range *range)
146{ 148{
147 unsigned int h = hash_by_src(net, tuple); 149 unsigned int h = hash_by_src(net, zone, tuple);
148 const struct nf_conn_nat *nat; 150 const struct nf_conn_nat *nat;
149 const struct nf_conn *ct; 151 const struct nf_conn *ct;
150 const struct hlist_node *n; 152 const struct hlist_node *n;
@@ -152,7 +154,7 @@ find_appropriate_src(struct net *net,
152 rcu_read_lock(); 154 rcu_read_lock();
153 hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) { 155 hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) {
154 ct = nat->ct; 156 ct = nat->ct;
155 if (same_src(ct, tuple)) { 157 if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
156 /* Copy source part from reply tuple. */ 158 /* Copy source part from reply tuple. */
157 nf_ct_invert_tuplepr(result, 159 nf_ct_invert_tuplepr(result,
158 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 160 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
@@ -175,7 +177,7 @@ find_appropriate_src(struct net *net,
175 the ip with the lowest src-ip/dst-ip/proto usage. 177 the ip with the lowest src-ip/dst-ip/proto usage.
176*/ 178*/
177static void 179static void
178find_best_ips_proto(struct nf_conntrack_tuple *tuple, 180find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
179 const struct nf_nat_range *range, 181 const struct nf_nat_range *range,
180 const struct nf_conn *ct, 182 const struct nf_conn *ct,
181 enum nf_nat_manip_type maniptype) 183 enum nf_nat_manip_type maniptype)
@@ -209,7 +211,7 @@ find_best_ips_proto(struct nf_conntrack_tuple *tuple,
209 maxip = ntohl(range->max_ip); 211 maxip = ntohl(range->max_ip);
210 j = jhash_2words((__force u32)tuple->src.u3.ip, 212 j = jhash_2words((__force u32)tuple->src.u3.ip,
211 range->flags & IP_NAT_RANGE_PERSISTENT ? 213 range->flags & IP_NAT_RANGE_PERSISTENT ?
212 0 : (__force u32)tuple->dst.u3.ip, 0); 214 0 : (__force u32)tuple->dst.u3.ip ^ zone, 0);
213 j = ((u64)j * (maxip - minip + 1)) >> 32; 215 j = ((u64)j * (maxip - minip + 1)) >> 32;
214 *var_ipp = htonl(minip + j); 216 *var_ipp = htonl(minip + j);
215} 217}
@@ -229,6 +231,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
229{ 231{
230 struct net *net = nf_ct_net(ct); 232 struct net *net = nf_ct_net(ct);
231 const struct nf_nat_protocol *proto; 233 const struct nf_nat_protocol *proto;
234 u16 zone = nf_ct_zone(ct);
232 235
233 /* 1) If this srcip/proto/src-proto-part is currently mapped, 236 /* 1) If this srcip/proto/src-proto-part is currently mapped,
234 and that same mapping gives a unique tuple within the given 237 and that same mapping gives a unique tuple within the given
@@ -239,7 +242,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
239 manips not an issue. */ 242 manips not an issue. */
240 if (maniptype == IP_NAT_MANIP_SRC && 243 if (maniptype == IP_NAT_MANIP_SRC &&
241 !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) { 244 !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
242 if (find_appropriate_src(net, orig_tuple, tuple, range)) { 245 if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) {
243 pr_debug("get_unique_tuple: Found current src map\n"); 246 pr_debug("get_unique_tuple: Found current src map\n");
244 if (!nf_nat_used_tuple(tuple, ct)) 247 if (!nf_nat_used_tuple(tuple, ct))
245 return; 248 return;
@@ -249,7 +252,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
249 /* 2) Select the least-used IP/proto combination in the given 252 /* 2) Select the least-used IP/proto combination in the given
250 range. */ 253 range. */
251 *tuple = *orig_tuple; 254 *tuple = *orig_tuple;
252 find_best_ips_proto(tuple, range, ct, maniptype); 255 find_best_ips_proto(zone, tuple, range, ct, maniptype);
253 256
254 /* 3) The per-protocol part of the manip is made to map into 257 /* 3) The per-protocol part of the manip is made to map into
255 the range to make a unique tuple. */ 258 the range to make a unique tuple. */
@@ -327,7 +330,8 @@ nf_nat_setup_info(struct nf_conn *ct,
327 if (have_to_hash) { 330 if (have_to_hash) {
328 unsigned int srchash; 331 unsigned int srchash;
329 332
330 srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 333 srchash = hash_by_src(net, nf_ct_zone(ct),
334 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
331 spin_lock_bh(&nf_nat_lock); 335 spin_lock_bh(&nf_nat_lock);
332 /* nf_conntrack_alter_reply might re-allocate exntension aera */ 336 /* nf_conntrack_alter_reply might re-allocate exntension aera */
333 nat = nfct_nat(ct); 337 nat = nfct_nat(ct);
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c
index a1d5d58a58b..86e0e84ff0a 100644
--- a/net/ipv4/netfilter/nf_nat_ftp.c
+++ b/net/ipv4/netfilter/nf_nat_ftp.c
@@ -27,76 +27,29 @@ MODULE_ALIAS("ip_nat_ftp");
27 27
28/* FIXME: Time out? --RR */ 28/* FIXME: Time out? --RR */
29 29
30static int 30static int nf_nat_ftp_fmt_cmd(enum nf_ct_ftp_type type,
31mangle_rfc959_packet(struct sk_buff *skb, 31 char *buffer, size_t buflen,
32 __be32 newip, 32 __be32 addr, u16 port)
33 u_int16_t port,
34 unsigned int matchoff,
35 unsigned int matchlen,
36 struct nf_conn *ct,
37 enum ip_conntrack_info ctinfo)
38{ 33{
39 char buffer[sizeof("nnn,nnn,nnn,nnn,nnn,nnn")]; 34 switch (type) {
40 35 case NF_CT_FTP_PORT:
41 sprintf(buffer, "%u,%u,%u,%u,%u,%u", 36 case NF_CT_FTP_PASV:
42 NIPQUAD(newip), port>>8, port&0xFF); 37 return snprintf(buffer, buflen, "%u,%u,%u,%u,%u,%u",
43 38 ((unsigned char *)&addr)[0],
44 pr_debug("calling nf_nat_mangle_tcp_packet\n"); 39 ((unsigned char *)&addr)[1],
45 40 ((unsigned char *)&addr)[2],
46 return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff, 41 ((unsigned char *)&addr)[3],
47 matchlen, buffer, strlen(buffer)); 42 port >> 8,
48} 43 port & 0xFF);
49 44 case NF_CT_FTP_EPRT:
50/* |1|132.235.1.2|6275| */ 45 return snprintf(buffer, buflen, "|1|%pI4|%u|", &addr, port);
51static int 46 case NF_CT_FTP_EPSV:
52mangle_eprt_packet(struct sk_buff *skb, 47 return snprintf(buffer, buflen, "|||%u|", port);
53 __be32 newip, 48 }
54 u_int16_t port,
55 unsigned int matchoff,
56 unsigned int matchlen,
57 struct nf_conn *ct,
58 enum ip_conntrack_info ctinfo)
59{
60 char buffer[sizeof("|1|255.255.255.255|65535|")];
61
62 sprintf(buffer, "|1|%u.%u.%u.%u|%u|", NIPQUAD(newip), port);
63
64 pr_debug("calling nf_nat_mangle_tcp_packet\n");
65
66 return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff,
67 matchlen, buffer, strlen(buffer));
68}
69
70/* |1|132.235.1.2|6275| */
71static int
72mangle_epsv_packet(struct sk_buff *skb,
73 __be32 newip,
74 u_int16_t port,
75 unsigned int matchoff,
76 unsigned int matchlen,
77 struct nf_conn *ct,
78 enum ip_conntrack_info ctinfo)
79{
80 char buffer[sizeof("|||65535|")];
81
82 sprintf(buffer, "|||%u|", port);
83
84 pr_debug("calling nf_nat_mangle_tcp_packet\n");
85 49
86 return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff, 50 return 0;
87 matchlen, buffer, strlen(buffer));
88} 51}
89 52
90static int (*mangle[])(struct sk_buff *, __be32, u_int16_t,
91 unsigned int, unsigned int, struct nf_conn *,
92 enum ip_conntrack_info)
93= {
94 [NF_CT_FTP_PORT] = mangle_rfc959_packet,
95 [NF_CT_FTP_PASV] = mangle_rfc959_packet,
96 [NF_CT_FTP_EPRT] = mangle_eprt_packet,
97 [NF_CT_FTP_EPSV] = mangle_epsv_packet
98};
99
100/* So, this packet has hit the connection tracking matching code. 53/* So, this packet has hit the connection tracking matching code.
101 Mangle it, and change the expectation to match the new version. */ 54 Mangle it, and change the expectation to match the new version. */
102static unsigned int nf_nat_ftp(struct sk_buff *skb, 55static unsigned int nf_nat_ftp(struct sk_buff *skb,
@@ -110,6 +63,8 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
110 u_int16_t port; 63 u_int16_t port;
111 int dir = CTINFO2DIR(ctinfo); 64 int dir = CTINFO2DIR(ctinfo);
112 struct nf_conn *ct = exp->master; 65 struct nf_conn *ct = exp->master;
66 char buffer[sizeof("|1|255.255.255.255|65535|")];
67 unsigned int buflen;
113 68
114 pr_debug("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen); 69 pr_debug("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen);
115 70
@@ -132,11 +87,21 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
132 if (port == 0) 87 if (port == 0)
133 return NF_DROP; 88 return NF_DROP;
134 89
135 if (!mangle[type](skb, newip, port, matchoff, matchlen, ct, ctinfo)) { 90 buflen = nf_nat_ftp_fmt_cmd(type, buffer, sizeof(buffer), newip, port);
136 nf_ct_unexpect_related(exp); 91 if (!buflen)
137 return NF_DROP; 92 goto out;
138 } 93
94 pr_debug("calling nf_nat_mangle_tcp_packet\n");
95
96 if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff,
97 matchlen, buffer, buflen))
98 goto out;
99
139 return NF_ACCEPT; 100 return NF_ACCEPT;
101
102out:
103 nf_ct_unexpect_related(exp);
104 return NF_DROP;
140} 105}
141 106
142static void __exit nf_nat_ftp_fini(void) 107static void __exit nf_nat_ftp_fini(void)
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 7f10a6be019..4b6af4bb1f5 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -141,6 +141,17 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
141 return 1; 141 return 1;
142} 142}
143 143
144void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
145 __be32 seq, s16 off)
146{
147 if (!off)
148 return;
149 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
150 adjust_tcp_sequence(ntohl(seq), off, ct, ctinfo);
151 nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
152}
153EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
154
144/* Generic function for mangling variable-length address changes inside 155/* Generic function for mangling variable-length address changes inside
145 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX 156 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
146 * command in FTP). 157 * command in FTP).
@@ -149,14 +160,13 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
149 * skb enlargement, ... 160 * skb enlargement, ...
150 * 161 *
151 * */ 162 * */
152int 163int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
153nf_nat_mangle_tcp_packet(struct sk_buff *skb, 164 struct nf_conn *ct,
154 struct nf_conn *ct, 165 enum ip_conntrack_info ctinfo,
155 enum ip_conntrack_info ctinfo, 166 unsigned int match_offset,
156 unsigned int match_offset, 167 unsigned int match_len,
157 unsigned int match_len, 168 const char *rep_buffer,
158 const char *rep_buffer, 169 unsigned int rep_len, bool adjust)
159 unsigned int rep_len)
160{ 170{
161 struct rtable *rt = skb_rtable(skb); 171 struct rtable *rt = skb_rtable(skb);
162 struct iphdr *iph; 172 struct iphdr *iph;
@@ -202,16 +212,13 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb,
202 inet_proto_csum_replace2(&tcph->check, skb, 212 inet_proto_csum_replace2(&tcph->check, skb,
203 htons(oldlen), htons(datalen), 1); 213 htons(oldlen), htons(datalen), 1);
204 214
205 if (rep_len != match_len) { 215 if (adjust && rep_len != match_len)
206 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); 216 nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
207 adjust_tcp_sequence(ntohl(tcph->seq), 217 (int)rep_len - (int)match_len);
208 (int)rep_len - (int)match_len, 218
209 ct, ctinfo);
210 nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
211 }
212 return 1; 219 return 1;
213} 220}
214EXPORT_SYMBOL(nf_nat_mangle_tcp_packet); 221EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet);
215 222
216/* Generic function for mangling variable-length address changes inside 223/* Generic function for mangling variable-length address changes inside
217 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX 224 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 9eb171056c6..4c060038d29 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -25,6 +25,7 @@
25#include <net/netfilter/nf_nat_rule.h> 25#include <net/netfilter/nf_nat_rule.h>
26#include <net/netfilter/nf_conntrack_helper.h> 26#include <net/netfilter/nf_conntrack_helper.h>
27#include <net/netfilter/nf_conntrack_expect.h> 27#include <net/netfilter/nf_conntrack_expect.h>
28#include <net/netfilter/nf_conntrack_zones.h>
28#include <linux/netfilter/nf_conntrack_proto_gre.h> 29#include <linux/netfilter/nf_conntrack_proto_gre.h>
29#include <linux/netfilter/nf_conntrack_pptp.h> 30#include <linux/netfilter/nf_conntrack_pptp.h>
30 31
@@ -74,7 +75,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
74 75
75 pr_debug("trying to unexpect other dir: "); 76 pr_debug("trying to unexpect other dir: ");
76 nf_ct_dump_tuple_ip(&t); 77 nf_ct_dump_tuple_ip(&t);
77 other_exp = nf_ct_expect_find_get(net, &t); 78 other_exp = nf_ct_expect_find_get(net, nf_ct_zone(ct), &t);
78 if (other_exp) { 79 if (other_exp) {
79 nf_ct_unexpect_related(other_exp); 80 nf_ct_unexpect_related(other_exp);
80 nf_ct_expect_put(other_exp); 81 nf_ct_expect_put(other_exp);
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 9e81e0dfb4e..ab74cc0535e 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -28,36 +28,6 @@
28 (1 << NF_INET_POST_ROUTING) | \ 28 (1 << NF_INET_POST_ROUTING) | \
29 (1 << NF_INET_LOCAL_OUT)) 29 (1 << NF_INET_LOCAL_OUT))
30 30
31static const struct
32{
33 struct ipt_replace repl;
34 struct ipt_standard entries[3];
35 struct ipt_error term;
36} nat_initial_table __net_initdata = {
37 .repl = {
38 .name = "nat",
39 .valid_hooks = NAT_VALID_HOOKS,
40 .num_entries = 4,
41 .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
42 .hook_entry = {
43 [NF_INET_PRE_ROUTING] = 0,
44 [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard),
45 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
46 },
47 .underflow = {
48 [NF_INET_PRE_ROUTING] = 0,
49 [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard),
50 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
51 },
52 },
53 .entries = {
54 IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
55 IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
56 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
57 },
58 .term = IPT_ERROR_INIT, /* ERROR */
59};
60
61static const struct xt_table nat_table = { 31static const struct xt_table nat_table = {
62 .name = "nat", 32 .name = "nat",
63 .valid_hooks = NAT_VALID_HOOKS, 33 .valid_hooks = NAT_VALID_HOOKS,
@@ -186,8 +156,13 @@ static struct xt_target ipt_dnat_reg __read_mostly = {
186 156
187static int __net_init nf_nat_rule_net_init(struct net *net) 157static int __net_init nf_nat_rule_net_init(struct net *net)
188{ 158{
189 net->ipv4.nat_table = ipt_register_table(net, &nat_table, 159 struct ipt_replace *repl;
190 &nat_initial_table.repl); 160
161 repl = ipt_alloc_initial_table(&nat_table);
162 if (repl == NULL)
163 return -ENOMEM;
164 net->ipv4.nat_table = ipt_register_table(net, &nat_table, repl);
165 kfree(repl);
191 if (IS_ERR(net->ipv4.nat_table)) 166 if (IS_ERR(net->ipv4.nat_table))
192 return PTR_ERR(net->ipv4.nat_table); 167 return PTR_ERR(net->ipv4.nat_table);
193 return 0; 168 return 0;
@@ -195,7 +170,7 @@ static int __net_init nf_nat_rule_net_init(struct net *net)
195 170
196static void __net_exit nf_nat_rule_net_exit(struct net *net) 171static void __net_exit nf_nat_rule_net_exit(struct net *net)
197{ 172{
198 ipt_unregister_table(net->ipv4.nat_table); 173 ipt_unregister_table(net, net->ipv4.nat_table);
199} 174}
200 175
201static struct pernet_operations nf_nat_rule_net_ops = { 176static struct pernet_operations nf_nat_rule_net_ops = {
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 07d61a57613..11b538deaae 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -1,4 +1,4 @@
1/* SIP extension for UDP NAT alteration. 1/* SIP extension for NAT alteration.
2 * 2 *
3 * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar> 3 * (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar>
4 * based on RR's ip_nat_ftp.c and other modules. 4 * based on RR's ip_nat_ftp.c and other modules.
@@ -15,6 +15,7 @@
15#include <linux/ip.h> 15#include <linux/ip.h>
16#include <net/ip.h> 16#include <net/ip.h>
17#include <linux/udp.h> 17#include <linux/udp.h>
18#include <linux/tcp.h>
18 19
19#include <net/netfilter/nf_nat.h> 20#include <net/netfilter/nf_nat.h>
20#include <net/netfilter/nf_nat_helper.h> 21#include <net/netfilter/nf_nat_helper.h>
@@ -29,25 +30,42 @@ MODULE_DESCRIPTION("SIP NAT helper");
29MODULE_ALIAS("ip_nat_sip"); 30MODULE_ALIAS("ip_nat_sip");
30 31
31 32
32static unsigned int mangle_packet(struct sk_buff *skb, 33static unsigned int mangle_packet(struct sk_buff *skb, unsigned int dataoff,
33 const char **dptr, unsigned int *datalen, 34 const char **dptr, unsigned int *datalen,
34 unsigned int matchoff, unsigned int matchlen, 35 unsigned int matchoff, unsigned int matchlen,
35 const char *buffer, unsigned int buflen) 36 const char *buffer, unsigned int buflen)
36{ 37{
37 enum ip_conntrack_info ctinfo; 38 enum ip_conntrack_info ctinfo;
38 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 39 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
39 40 struct tcphdr *th;
40 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, matchoff, matchlen, 41 unsigned int baseoff;
41 buffer, buflen)) 42
42 return 0; 43 if (nf_ct_protonum(ct) == IPPROTO_TCP) {
44 th = (struct tcphdr *)(skb->data + ip_hdrlen(skb));
45 baseoff = ip_hdrlen(skb) + th->doff * 4;
46 matchoff += dataoff - baseoff;
47
48 if (!__nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
49 matchoff, matchlen,
50 buffer, buflen, false))
51 return 0;
52 } else {
53 baseoff = ip_hdrlen(skb) + sizeof(struct udphdr);
54 matchoff += dataoff - baseoff;
55
56 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
57 matchoff, matchlen,
58 buffer, buflen))
59 return 0;
60 }
43 61
44 /* Reload data pointer and adjust datalen value */ 62 /* Reload data pointer and adjust datalen value */
45 *dptr = skb->data + ip_hdrlen(skb) + sizeof(struct udphdr); 63 *dptr = skb->data + dataoff;
46 *datalen += buflen - matchlen; 64 *datalen += buflen - matchlen;
47 return 1; 65 return 1;
48} 66}
49 67
50static int map_addr(struct sk_buff *skb, 68static int map_addr(struct sk_buff *skb, unsigned int dataoff,
51 const char **dptr, unsigned int *datalen, 69 const char **dptr, unsigned int *datalen,
52 unsigned int matchoff, unsigned int matchlen, 70 unsigned int matchoff, unsigned int matchlen,
53 union nf_inet_addr *addr, __be16 port) 71 union nf_inet_addr *addr, __be16 port)
@@ -76,11 +94,11 @@ static int map_addr(struct sk_buff *skb,
76 94
77 buflen = sprintf(buffer, "%pI4:%u", &newaddr, ntohs(newport)); 95 buflen = sprintf(buffer, "%pI4:%u", &newaddr, ntohs(newport));
78 96
79 return mangle_packet(skb, dptr, datalen, matchoff, matchlen, 97 return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
80 buffer, buflen); 98 buffer, buflen);
81} 99}
82 100
83static int map_sip_addr(struct sk_buff *skb, 101static int map_sip_addr(struct sk_buff *skb, unsigned int dataoff,
84 const char **dptr, unsigned int *datalen, 102 const char **dptr, unsigned int *datalen,
85 enum sip_header_types type) 103 enum sip_header_types type)
86{ 104{
@@ -93,16 +111,18 @@ static int map_sip_addr(struct sk_buff *skb,
93 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL, 111 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL,
94 &matchoff, &matchlen, &addr, &port) <= 0) 112 &matchoff, &matchlen, &addr, &port) <= 0)
95 return 1; 113 return 1;
96 return map_addr(skb, dptr, datalen, matchoff, matchlen, &addr, port); 114 return map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
115 &addr, port);
97} 116}
98 117
99static unsigned int ip_nat_sip(struct sk_buff *skb, 118static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
100 const char **dptr, unsigned int *datalen) 119 const char **dptr, unsigned int *datalen)
101{ 120{
102 enum ip_conntrack_info ctinfo; 121 enum ip_conntrack_info ctinfo;
103 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 122 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
104 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 123 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
105 unsigned int dataoff, matchoff, matchlen; 124 unsigned int coff, matchoff, matchlen;
125 enum sip_header_types hdr;
106 union nf_inet_addr addr; 126 union nf_inet_addr addr;
107 __be16 port; 127 __be16 port;
108 int request, in_header; 128 int request, in_header;
@@ -112,16 +132,21 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
112 if (ct_sip_parse_request(ct, *dptr, *datalen, 132 if (ct_sip_parse_request(ct, *dptr, *datalen,
113 &matchoff, &matchlen, 133 &matchoff, &matchlen,
114 &addr, &port) > 0 && 134 &addr, &port) > 0 &&
115 !map_addr(skb, dptr, datalen, matchoff, matchlen, 135 !map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
116 &addr, port)) 136 &addr, port))
117 return NF_DROP; 137 return NF_DROP;
118 request = 1; 138 request = 1;
119 } else 139 } else
120 request = 0; 140 request = 0;
121 141
142 if (nf_ct_protonum(ct) == IPPROTO_TCP)
143 hdr = SIP_HDR_VIA_TCP;
144 else
145 hdr = SIP_HDR_VIA_UDP;
146
122 /* Translate topmost Via header and parameters */ 147 /* Translate topmost Via header and parameters */
123 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, 148 if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
124 SIP_HDR_VIA, NULL, &matchoff, &matchlen, 149 hdr, NULL, &matchoff, &matchlen,
125 &addr, &port) > 0) { 150 &addr, &port) > 0) {
126 unsigned int matchend, poff, plen, buflen, n; 151 unsigned int matchend, poff, plen, buflen, n;
127 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; 152 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
@@ -138,7 +163,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
138 goto next; 163 goto next;
139 } 164 }
140 165
141 if (!map_addr(skb, dptr, datalen, matchoff, matchlen, 166 if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
142 &addr, port)) 167 &addr, port))
143 return NF_DROP; 168 return NF_DROP;
144 169
@@ -153,8 +178,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
153 addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) { 178 addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) {
154 buflen = sprintf(buffer, "%pI4", 179 buflen = sprintf(buffer, "%pI4",
155 &ct->tuplehash[!dir].tuple.dst.u3.ip); 180 &ct->tuplehash[!dir].tuple.dst.u3.ip);
156 if (!mangle_packet(skb, dptr, datalen, poff, plen, 181 if (!mangle_packet(skb, dataoff, dptr, datalen,
157 buffer, buflen)) 182 poff, plen, buffer, buflen))
158 return NF_DROP; 183 return NF_DROP;
159 } 184 }
160 185
@@ -167,8 +192,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
167 addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { 192 addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) {
168 buflen = sprintf(buffer, "%pI4", 193 buflen = sprintf(buffer, "%pI4",
169 &ct->tuplehash[!dir].tuple.src.u3.ip); 194 &ct->tuplehash[!dir].tuple.src.u3.ip);
170 if (!mangle_packet(skb, dptr, datalen, poff, plen, 195 if (!mangle_packet(skb, dataoff, dptr, datalen,
171 buffer, buflen)) 196 poff, plen, buffer, buflen))
172 return NF_DROP; 197 return NF_DROP;
173 } 198 }
174 199
@@ -181,31 +206,45 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
181 htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) { 206 htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) {
182 __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port; 207 __be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port;
183 buflen = sprintf(buffer, "%u", ntohs(p)); 208 buflen = sprintf(buffer, "%u", ntohs(p));
184 if (!mangle_packet(skb, dptr, datalen, poff, plen, 209 if (!mangle_packet(skb, dataoff, dptr, datalen,
185 buffer, buflen)) 210 poff, plen, buffer, buflen))
186 return NF_DROP; 211 return NF_DROP;
187 } 212 }
188 } 213 }
189 214
190next: 215next:
191 /* Translate Contact headers */ 216 /* Translate Contact headers */
192 dataoff = 0; 217 coff = 0;
193 in_header = 0; 218 in_header = 0;
194 while (ct_sip_parse_header_uri(ct, *dptr, &dataoff, *datalen, 219 while (ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen,
195 SIP_HDR_CONTACT, &in_header, 220 SIP_HDR_CONTACT, &in_header,
196 &matchoff, &matchlen, 221 &matchoff, &matchlen,
197 &addr, &port) > 0) { 222 &addr, &port) > 0) {
198 if (!map_addr(skb, dptr, datalen, matchoff, matchlen, 223 if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
199 &addr, port)) 224 &addr, port))
200 return NF_DROP; 225 return NF_DROP;
201 } 226 }
202 227
203 if (!map_sip_addr(skb, dptr, datalen, SIP_HDR_FROM) || 228 if (!map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_FROM) ||
204 !map_sip_addr(skb, dptr, datalen, SIP_HDR_TO)) 229 !map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_TO))
205 return NF_DROP; 230 return NF_DROP;
231
206 return NF_ACCEPT; 232 return NF_ACCEPT;
207} 233}
208 234
235static void ip_nat_sip_seq_adjust(struct sk_buff *skb, s16 off)
236{
237 enum ip_conntrack_info ctinfo;
238 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
239 const struct tcphdr *th;
240
241 if (nf_ct_protonum(ct) != IPPROTO_TCP || off == 0)
242 return;
243
244 th = (struct tcphdr *)(skb->data + ip_hdrlen(skb));
245 nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
246}
247
209/* Handles expected signalling connections and media streams */ 248/* Handles expected signalling connections and media streams */
210static void ip_nat_sip_expected(struct nf_conn *ct, 249static void ip_nat_sip_expected(struct nf_conn *ct,
211 struct nf_conntrack_expect *exp) 250 struct nf_conntrack_expect *exp)
@@ -232,7 +271,7 @@ static void ip_nat_sip_expected(struct nf_conn *ct,
232 } 271 }
233} 272}
234 273
235static unsigned int ip_nat_sip_expect(struct sk_buff *skb, 274static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff,
236 const char **dptr, unsigned int *datalen, 275 const char **dptr, unsigned int *datalen,
237 struct nf_conntrack_expect *exp, 276 struct nf_conntrack_expect *exp,
238 unsigned int matchoff, 277 unsigned int matchoff,
@@ -279,8 +318,8 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb,
279 if (exp->tuple.dst.u3.ip != exp->saved_ip || 318 if (exp->tuple.dst.u3.ip != exp->saved_ip ||
280 exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) { 319 exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) {
281 buflen = sprintf(buffer, "%pI4:%u", &newip, port); 320 buflen = sprintf(buffer, "%pI4:%u", &newip, port);
282 if (!mangle_packet(skb, dptr, datalen, matchoff, matchlen, 321 if (!mangle_packet(skb, dataoff, dptr, datalen,
283 buffer, buflen)) 322 matchoff, matchlen, buffer, buflen))
284 goto err; 323 goto err;
285 } 324 }
286 return NF_ACCEPT; 325 return NF_ACCEPT;
@@ -290,7 +329,7 @@ err:
290 return NF_DROP; 329 return NF_DROP;
291} 330}
292 331
293static int mangle_content_len(struct sk_buff *skb, 332static int mangle_content_len(struct sk_buff *skb, unsigned int dataoff,
294 const char **dptr, unsigned int *datalen) 333 const char **dptr, unsigned int *datalen)
295{ 334{
296 enum ip_conntrack_info ctinfo; 335 enum ip_conntrack_info ctinfo;
@@ -312,12 +351,13 @@ static int mangle_content_len(struct sk_buff *skb,
312 return 0; 351 return 0;
313 352
314 buflen = sprintf(buffer, "%u", c_len); 353 buflen = sprintf(buffer, "%u", c_len);
315 return mangle_packet(skb, dptr, datalen, matchoff, matchlen, 354 return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
316 buffer, buflen); 355 buffer, buflen);
317} 356}
318 357
319static int mangle_sdp_packet(struct sk_buff *skb, const char **dptr, 358static int mangle_sdp_packet(struct sk_buff *skb, unsigned int dataoff,
320 unsigned int dataoff, unsigned int *datalen, 359 const char **dptr, unsigned int *datalen,
360 unsigned int sdpoff,
321 enum sdp_header_types type, 361 enum sdp_header_types type,
322 enum sdp_header_types term, 362 enum sdp_header_types term,
323 char *buffer, int buflen) 363 char *buffer, int buflen)
@@ -326,16 +366,16 @@ static int mangle_sdp_packet(struct sk_buff *skb, const char **dptr,
326 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 366 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
327 unsigned int matchlen, matchoff; 367 unsigned int matchlen, matchoff;
328 368
329 if (ct_sip_get_sdp_header(ct, *dptr, dataoff, *datalen, type, term, 369 if (ct_sip_get_sdp_header(ct, *dptr, sdpoff, *datalen, type, term,
330 &matchoff, &matchlen) <= 0) 370 &matchoff, &matchlen) <= 0)
331 return -ENOENT; 371 return -ENOENT;
332 return mangle_packet(skb, dptr, datalen, matchoff, matchlen, 372 return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
333 buffer, buflen) ? 0 : -EINVAL; 373 buffer, buflen) ? 0 : -EINVAL;
334} 374}
335 375
336static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr, 376static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, unsigned int dataoff,
337 unsigned int dataoff, 377 const char **dptr, unsigned int *datalen,
338 unsigned int *datalen, 378 unsigned int sdpoff,
339 enum sdp_header_types type, 379 enum sdp_header_types type,
340 enum sdp_header_types term, 380 enum sdp_header_types term,
341 const union nf_inet_addr *addr) 381 const union nf_inet_addr *addr)
@@ -344,16 +384,15 @@ static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr,
344 unsigned int buflen; 384 unsigned int buflen;
345 385
346 buflen = sprintf(buffer, "%pI4", &addr->ip); 386 buflen = sprintf(buffer, "%pI4", &addr->ip);
347 if (mangle_sdp_packet(skb, dptr, dataoff, datalen, type, term, 387 if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff, type, term,
348 buffer, buflen)) 388 buffer, buflen))
349 return 0; 389 return 0;
350 390
351 return mangle_content_len(skb, dptr, datalen); 391 return mangle_content_len(skb, dataoff, dptr, datalen);
352} 392}
353 393
354static unsigned int ip_nat_sdp_port(struct sk_buff *skb, 394static unsigned int ip_nat_sdp_port(struct sk_buff *skb, unsigned int dataoff,
355 const char **dptr, 395 const char **dptr, unsigned int *datalen,
356 unsigned int *datalen,
357 unsigned int matchoff, 396 unsigned int matchoff,
358 unsigned int matchlen, 397 unsigned int matchlen,
359 u_int16_t port) 398 u_int16_t port)
@@ -362,16 +401,16 @@ static unsigned int ip_nat_sdp_port(struct sk_buff *skb,
362 unsigned int buflen; 401 unsigned int buflen;
363 402
364 buflen = sprintf(buffer, "%u", port); 403 buflen = sprintf(buffer, "%u", port);
365 if (!mangle_packet(skb, dptr, datalen, matchoff, matchlen, 404 if (!mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
366 buffer, buflen)) 405 buffer, buflen))
367 return 0; 406 return 0;
368 407
369 return mangle_content_len(skb, dptr, datalen); 408 return mangle_content_len(skb, dataoff, dptr, datalen);
370} 409}
371 410
372static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr, 411static unsigned int ip_nat_sdp_session(struct sk_buff *skb, unsigned int dataoff,
373 unsigned int dataoff, 412 const char **dptr, unsigned int *datalen,
374 unsigned int *datalen, 413 unsigned int sdpoff,
375 const union nf_inet_addr *addr) 414 const union nf_inet_addr *addr)
376{ 415{
377 char buffer[sizeof("nnn.nnn.nnn.nnn")]; 416 char buffer[sizeof("nnn.nnn.nnn.nnn")];
@@ -379,12 +418,12 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr,
379 418
380 /* Mangle session description owner and contact addresses */ 419 /* Mangle session description owner and contact addresses */
381 buflen = sprintf(buffer, "%pI4", &addr->ip); 420 buflen = sprintf(buffer, "%pI4", &addr->ip);
382 if (mangle_sdp_packet(skb, dptr, dataoff, datalen, 421 if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff,
383 SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA, 422 SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA,
384 buffer, buflen)) 423 buffer, buflen))
385 return 0; 424 return 0;
386 425
387 switch (mangle_sdp_packet(skb, dptr, dataoff, datalen, 426 switch (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff,
388 SDP_HDR_CONNECTION_IP4, SDP_HDR_MEDIA, 427 SDP_HDR_CONNECTION_IP4, SDP_HDR_MEDIA,
389 buffer, buflen)) { 428 buffer, buflen)) {
390 case 0: 429 case 0:
@@ -401,14 +440,13 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr,
401 return 0; 440 return 0;
402 } 441 }
403 442
404 return mangle_content_len(skb, dptr, datalen); 443 return mangle_content_len(skb, dataoff, dptr, datalen);
405} 444}
406 445
407/* So, this packet has hit the connection tracking matching code. 446/* So, this packet has hit the connection tracking matching code.
408 Mangle it, and change the expectation to match the new version. */ 447 Mangle it, and change the expectation to match the new version. */
409static unsigned int ip_nat_sdp_media(struct sk_buff *skb, 448static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
410 const char **dptr, 449 const char **dptr, unsigned int *datalen,
411 unsigned int *datalen,
412 struct nf_conntrack_expect *rtp_exp, 450 struct nf_conntrack_expect *rtp_exp,
413 struct nf_conntrack_expect *rtcp_exp, 451 struct nf_conntrack_expect *rtcp_exp,
414 unsigned int mediaoff, 452 unsigned int mediaoff,
@@ -456,7 +494,8 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb,
456 494
457 /* Update media port. */ 495 /* Update media port. */
458 if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port && 496 if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port &&
459 !ip_nat_sdp_port(skb, dptr, datalen, mediaoff, medialen, port)) 497 !ip_nat_sdp_port(skb, dataoff, dptr, datalen,
498 mediaoff, medialen, port))
460 goto err2; 499 goto err2;
461 500
462 return NF_ACCEPT; 501 return NF_ACCEPT;
@@ -471,6 +510,7 @@ err1:
471static void __exit nf_nat_sip_fini(void) 510static void __exit nf_nat_sip_fini(void)
472{ 511{
473 rcu_assign_pointer(nf_nat_sip_hook, NULL); 512 rcu_assign_pointer(nf_nat_sip_hook, NULL);
513 rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, NULL);
474 rcu_assign_pointer(nf_nat_sip_expect_hook, NULL); 514 rcu_assign_pointer(nf_nat_sip_expect_hook, NULL);
475 rcu_assign_pointer(nf_nat_sdp_addr_hook, NULL); 515 rcu_assign_pointer(nf_nat_sdp_addr_hook, NULL);
476 rcu_assign_pointer(nf_nat_sdp_port_hook, NULL); 516 rcu_assign_pointer(nf_nat_sdp_port_hook, NULL);
@@ -482,12 +522,14 @@ static void __exit nf_nat_sip_fini(void)
482static int __init nf_nat_sip_init(void) 522static int __init nf_nat_sip_init(void)
483{ 523{
484 BUG_ON(nf_nat_sip_hook != NULL); 524 BUG_ON(nf_nat_sip_hook != NULL);
525 BUG_ON(nf_nat_sip_seq_adjust_hook != NULL);
485 BUG_ON(nf_nat_sip_expect_hook != NULL); 526 BUG_ON(nf_nat_sip_expect_hook != NULL);
486 BUG_ON(nf_nat_sdp_addr_hook != NULL); 527 BUG_ON(nf_nat_sdp_addr_hook != NULL);
487 BUG_ON(nf_nat_sdp_port_hook != NULL); 528 BUG_ON(nf_nat_sdp_port_hook != NULL);
488 BUG_ON(nf_nat_sdp_session_hook != NULL); 529 BUG_ON(nf_nat_sdp_session_hook != NULL);
489 BUG_ON(nf_nat_sdp_media_hook != NULL); 530 BUG_ON(nf_nat_sdp_media_hook != NULL);
490 rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip); 531 rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip);
532 rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust);
491 rcu_assign_pointer(nf_nat_sip_expect_hook, ip_nat_sip_expect); 533 rcu_assign_pointer(nf_nat_sip_expect_hook, ip_nat_sip_expect);
492 rcu_assign_pointer(nf_nat_sdp_addr_hook, ip_nat_sdp_addr); 534 rcu_assign_pointer(nf_nat_sdp_addr_hook, ip_nat_sdp_addr);
493 rcu_assign_pointer(nf_nat_sdp_port_hook, ip_nat_sdp_port); 535 rcu_assign_pointer(nf_nat_sdp_port_hook, ip_nat_sdp_port);
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index d9521f6f9ed..0b9c7ce3d6c 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1038,7 +1038,7 @@ static int snmp_parse_mangle(unsigned char *msg,
1038 unsigned int cls, con, tag, vers, pdutype; 1038 unsigned int cls, con, tag, vers, pdutype;
1039 struct asn1_ctx ctx; 1039 struct asn1_ctx ctx;
1040 struct asn1_octstr comm; 1040 struct asn1_octstr comm;
1041 struct snmp_object **obj; 1041 struct snmp_object *obj;
1042 1042
1043 if (debug > 1) 1043 if (debug > 1)
1044 hex_dump(msg, len); 1044 hex_dump(msg, len);
@@ -1148,43 +1148,34 @@ static int snmp_parse_mangle(unsigned char *msg,
1148 if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ) 1148 if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
1149 return 0; 1149 return 0;
1150 1150
1151 obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC);
1152 if (obj == NULL) {
1153 if (net_ratelimit())
1154 printk(KERN_WARNING "OOM in bsalg(%d)\n", __LINE__);
1155 return 0;
1156 }
1157
1158 while (!asn1_eoc_decode(&ctx, eoc)) { 1151 while (!asn1_eoc_decode(&ctx, eoc)) {
1159 unsigned int i; 1152 unsigned int i;
1160 1153
1161 if (!snmp_object_decode(&ctx, obj)) { 1154 if (!snmp_object_decode(&ctx, &obj)) {
1162 if (*obj) { 1155 if (obj) {
1163 kfree((*obj)->id); 1156 kfree(obj->id);
1164 kfree(*obj); 1157 kfree(obj);
1165 } 1158 }
1166 kfree(obj);
1167 return 0; 1159 return 0;
1168 } 1160 }
1169 1161
1170 if (debug > 1) { 1162 if (debug > 1) {
1171 printk(KERN_DEBUG "bsalg: object: "); 1163 printk(KERN_DEBUG "bsalg: object: ");
1172 for (i = 0; i < (*obj)->id_len; i++) { 1164 for (i = 0; i < obj->id_len; i++) {
1173 if (i > 0) 1165 if (i > 0)
1174 printk("."); 1166 printk(".");
1175 printk("%lu", (*obj)->id[i]); 1167 printk("%lu", obj->id[i]);
1176 } 1168 }
1177 printk(": type=%u\n", (*obj)->type); 1169 printk(": type=%u\n", obj->type);
1178 1170
1179 } 1171 }
1180 1172
1181 if ((*obj)->type == SNMP_IPADDR) 1173 if (obj->type == SNMP_IPADDR)
1182 mangle_address(ctx.begin, ctx.pointer - 4 , map, check); 1174 mangle_address(ctx.begin, ctx.pointer - 4 , map, check);
1183 1175
1184 kfree((*obj)->id); 1176 kfree(obj->id);
1185 kfree(*obj); 1177 kfree(obj);
1186 } 1178 }
1187 kfree(obj);
1188 1179
1189 if (!asn1_eoc_decode(&ctx, eoc)) 1180 if (!asn1_eoc_decode(&ctx, eoc))
1190 return 0; 1181 return 0;
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index f25542c48b7..242ed230737 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -127,8 +127,8 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
127 SNMP_MIB_SENTINEL 127 SNMP_MIB_SENTINEL
128}; 128};
129 129
130static struct { 130static const struct {
131 char *name; 131 const char *name;
132 int index; 132 int index;
133} icmpmibmap[] = { 133} icmpmibmap[] = {
134 { "DestUnreachs", ICMP_DEST_UNREACH }, 134 { "DestUnreachs", ICMP_DEST_UNREACH },
@@ -280,7 +280,7 @@ static void icmpmsg_put(struct seq_file *seq)
280 280
281 count = 0; 281 count = 0;
282 for (i = 0; i < ICMPMSG_MIB_MAX; i++) { 282 for (i = 0; i < ICMPMSG_MIB_MAX; i++) {
283 val = snmp_fold_field((void **) net->mib.icmpmsg_statistics, i); 283 val = snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, i);
284 if (val) { 284 if (val) {
285 type[count] = i; 285 type[count] = i;
286 vals[count++] = val; 286 vals[count++] = val;
@@ -307,18 +307,18 @@ static void icmp_put(struct seq_file *seq)
307 for (i=0; icmpmibmap[i].name != NULL; i++) 307 for (i=0; icmpmibmap[i].name != NULL; i++)
308 seq_printf(seq, " Out%s", icmpmibmap[i].name); 308 seq_printf(seq, " Out%s", icmpmibmap[i].name);
309 seq_printf(seq, "\nIcmp: %lu %lu", 309 seq_printf(seq, "\nIcmp: %lu %lu",
310 snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_INMSGS), 310 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INMSGS),
311 snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_INERRORS)); 311 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS));
312 for (i=0; icmpmibmap[i].name != NULL; i++) 312 for (i=0; icmpmibmap[i].name != NULL; i++)
313 seq_printf(seq, " %lu", 313 seq_printf(seq, " %lu",
314 snmp_fold_field((void **) net->mib.icmpmsg_statistics, 314 snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
315 icmpmibmap[i].index)); 315 icmpmibmap[i].index));
316 seq_printf(seq, " %lu %lu", 316 seq_printf(seq, " %lu %lu",
317 snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS), 317 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
318 snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS)); 318 snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
319 for (i=0; icmpmibmap[i].name != NULL; i++) 319 for (i=0; icmpmibmap[i].name != NULL; i++)
320 seq_printf(seq, " %lu", 320 seq_printf(seq, " %lu",
321 snmp_fold_field((void **) net->mib.icmpmsg_statistics, 321 snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
322 icmpmibmap[i].index | 0x100)); 322 icmpmibmap[i].index | 0x100));
323} 323}
324 324
@@ -341,7 +341,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
341 341
342 for (i = 0; snmp4_ipstats_list[i].name != NULL; i++) 342 for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
343 seq_printf(seq, " %lu", 343 seq_printf(seq, " %lu",
344 snmp_fold_field((void **)net->mib.ip_statistics, 344 snmp_fold_field((void __percpu **)net->mib.ip_statistics,
345 snmp4_ipstats_list[i].entry)); 345 snmp4_ipstats_list[i].entry));
346 346
347 icmp_put(seq); /* RFC 2011 compatibility */ 347 icmp_put(seq); /* RFC 2011 compatibility */
@@ -356,11 +356,11 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
356 /* MaxConn field is signed, RFC 2012 */ 356 /* MaxConn field is signed, RFC 2012 */
357 if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN) 357 if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
358 seq_printf(seq, " %ld", 358 seq_printf(seq, " %ld",
359 snmp_fold_field((void **)net->mib.tcp_statistics, 359 snmp_fold_field((void __percpu **)net->mib.tcp_statistics,
360 snmp4_tcp_list[i].entry)); 360 snmp4_tcp_list[i].entry));
361 else 361 else
362 seq_printf(seq, " %lu", 362 seq_printf(seq, " %lu",
363 snmp_fold_field((void **)net->mib.tcp_statistics, 363 snmp_fold_field((void __percpu **)net->mib.tcp_statistics,
364 snmp4_tcp_list[i].entry)); 364 snmp4_tcp_list[i].entry));
365 } 365 }
366 366
@@ -371,7 +371,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
371 seq_puts(seq, "\nUdp:"); 371 seq_puts(seq, "\nUdp:");
372 for (i = 0; snmp4_udp_list[i].name != NULL; i++) 372 for (i = 0; snmp4_udp_list[i].name != NULL; i++)
373 seq_printf(seq, " %lu", 373 seq_printf(seq, " %lu",
374 snmp_fold_field((void **)net->mib.udp_statistics, 374 snmp_fold_field((void __percpu **)net->mib.udp_statistics,
375 snmp4_udp_list[i].entry)); 375 snmp4_udp_list[i].entry));
376 376
377 /* the UDP and UDP-Lite MIBs are the same */ 377 /* the UDP and UDP-Lite MIBs are the same */
@@ -382,7 +382,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
382 seq_puts(seq, "\nUdpLite:"); 382 seq_puts(seq, "\nUdpLite:");
383 for (i = 0; snmp4_udp_list[i].name != NULL; i++) 383 for (i = 0; snmp4_udp_list[i].name != NULL; i++)
384 seq_printf(seq, " %lu", 384 seq_printf(seq, " %lu",
385 snmp_fold_field((void **)net->mib.udplite_statistics, 385 snmp_fold_field((void __percpu **)net->mib.udplite_statistics,
386 snmp4_udp_list[i].entry)); 386 snmp4_udp_list[i].entry));
387 387
388 seq_putc(seq, '\n'); 388 seq_putc(seq, '\n');
@@ -419,7 +419,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
419 seq_puts(seq, "\nTcpExt:"); 419 seq_puts(seq, "\nTcpExt:");
420 for (i = 0; snmp4_net_list[i].name != NULL; i++) 420 for (i = 0; snmp4_net_list[i].name != NULL; i++)
421 seq_printf(seq, " %lu", 421 seq_printf(seq, " %lu",
422 snmp_fold_field((void **)net->mib.net_statistics, 422 snmp_fold_field((void __percpu **)net->mib.net_statistics,
423 snmp4_net_list[i].entry)); 423 snmp4_net_list[i].entry));
424 424
425 seq_puts(seq, "\nIpExt:"); 425 seq_puts(seq, "\nIpExt:");
@@ -429,7 +429,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
429 seq_puts(seq, "\nIpExt:"); 429 seq_puts(seq, "\nIpExt:");
430 for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++) 430 for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
431 seq_printf(seq, " %lu", 431 seq_printf(seq, " %lu",
432 snmp_fold_field((void **)net->mib.ip_statistics, 432 snmp_fold_field((void __percpu **)net->mib.ip_statistics,
433 snmp4_ipextstats_list[i].entry)); 433 snmp4_ipextstats_list[i].entry));
434 434
435 seq_putc(seq, '\n'); 435 seq_putc(seq, '\n');
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d62b05d3338..04762d3bef7 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1990,8 +1990,13 @@ static int __mkroute_input(struct sk_buff *skb,
1990 if (skb->protocol != htons(ETH_P_IP)) { 1990 if (skb->protocol != htons(ETH_P_IP)) {
1991 /* Not IP (i.e. ARP). Do not create route, if it is 1991 /* Not IP (i.e. ARP). Do not create route, if it is
1992 * invalid for proxy arp. DNAT routes are always valid. 1992 * invalid for proxy arp. DNAT routes are always valid.
1993 *
1994 * Proxy arp feature have been extended to allow, ARP
1995 * replies back to the same interface, to support
1996 * Private VLAN switch technologies. See arp.c.
1993 */ 1997 */
1994 if (out_dev == in_dev) { 1998 if (out_dev == in_dev &&
1999 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1995 err = -EINVAL; 2000 err = -EINVAL;
1996 goto cleanup; 2001 goto cleanup;
1997 } 2002 }
@@ -3329,7 +3334,7 @@ static __net_initdata struct pernet_operations rt_secret_timer_ops = {
3329 3334
3330 3335
3331#ifdef CONFIG_NET_CLS_ROUTE 3336#ifdef CONFIG_NET_CLS_ROUTE
3332struct ip_rt_acct *ip_rt_acct __read_mostly; 3337struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3333#endif /* CONFIG_NET_CLS_ROUTE */ 3338#endif /* CONFIG_NET_CLS_ROUTE */
3334 3339
3335static __initdata unsigned long rhash_entries; 3340static __initdata unsigned long rhash_entries;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 66fd80ef247..5c24db4a3c9 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -358,7 +358,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
358 358
359 tcp_select_initial_window(tcp_full_space(sk), req->mss, 359 tcp_select_initial_window(tcp_full_space(sk), req->mss,
360 &req->rcv_wnd, &req->window_clamp, 360 &req->rcv_wnd, &req->window_clamp,
361 ireq->wscale_ok, &rcv_wscale); 361 ireq->wscale_ok, &rcv_wscale,
362 dst_metric(&rt->u.dst, RTAX_INITRWND));
362 363
363 ireq->rcv_wscale = rcv_wscale; 364 ireq->rcv_wscale = rcv_wscale;
364 365
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 7e3712ce399..c1bc074f61b 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -576,6 +576,20 @@ static struct ctl_table ipv4_table[] = {
576 .proc_handler = proc_dointvec 576 .proc_handler = proc_dointvec
577 }, 577 },
578 { 578 {
579 .procname = "tcp_thin_linear_timeouts",
580 .data = &sysctl_tcp_thin_linear_timeouts,
581 .maxlen = sizeof(int),
582 .mode = 0644,
583 .proc_handler = proc_dointvec
584 },
585 {
586 .procname = "tcp_thin_dupack",
587 .data = &sysctl_tcp_thin_dupack,
588 .maxlen = sizeof(int),
589 .mode = 0644,
590 .proc_handler = proc_dointvec
591 },
592 {
579 .procname = "udp_mem", 593 .procname = "udp_mem",
580 .data = &sysctl_udp_mem, 594 .data = &sysctl_udp_mem,
581 .maxlen = sizeof(sysctl_udp_mem), 595 .maxlen = sizeof(sysctl_udp_mem),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b0a26bb25e2..5901010fad5 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -536,8 +536,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
536 tp->nonagle &= ~TCP_NAGLE_PUSH; 536 tp->nonagle &= ~TCP_NAGLE_PUSH;
537} 537}
538 538
539static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, 539static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
540 struct sk_buff *skb)
541{ 540{
542 if (flags & MSG_OOB) 541 if (flags & MSG_OOB)
543 tp->snd_up = tp->write_seq; 542 tp->snd_up = tp->write_seq;
@@ -546,13 +545,13 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
546static inline void tcp_push(struct sock *sk, int flags, int mss_now, 545static inline void tcp_push(struct sock *sk, int flags, int mss_now,
547 int nonagle) 546 int nonagle)
548{ 547{
549 struct tcp_sock *tp = tcp_sk(sk);
550
551 if (tcp_send_head(sk)) { 548 if (tcp_send_head(sk)) {
552 struct sk_buff *skb = tcp_write_queue_tail(sk); 549 struct tcp_sock *tp = tcp_sk(sk);
550
553 if (!(flags & MSG_MORE) || forced_push(tp)) 551 if (!(flags & MSG_MORE) || forced_push(tp))
554 tcp_mark_push(tp, skb); 552 tcp_mark_push(tp, tcp_write_queue_tail(sk));
555 tcp_mark_urg(tp, flags, skb); 553
554 tcp_mark_urg(tp, flags);
556 __tcp_push_pending_frames(sk, mss_now, 555 __tcp_push_pending_frames(sk, mss_now,
557 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); 556 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
558 } 557 }
@@ -877,12 +876,12 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
877#define TCP_PAGE(sk) (sk->sk_sndmsg_page) 876#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
878#define TCP_OFF(sk) (sk->sk_sndmsg_off) 877#define TCP_OFF(sk) (sk->sk_sndmsg_off)
879 878
880static inline int select_size(struct sock *sk) 879static inline int select_size(struct sock *sk, int sg)
881{ 880{
882 struct tcp_sock *tp = tcp_sk(sk); 881 struct tcp_sock *tp = tcp_sk(sk);
883 int tmp = tp->mss_cache; 882 int tmp = tp->mss_cache;
884 883
885 if (sk->sk_route_caps & NETIF_F_SG) { 884 if (sg) {
886 if (sk_can_gso(sk)) 885 if (sk_can_gso(sk))
887 tmp = 0; 886 tmp = 0;
888 else { 887 else {
@@ -906,7 +905,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
906 struct sk_buff *skb; 905 struct sk_buff *skb;
907 int iovlen, flags; 906 int iovlen, flags;
908 int mss_now, size_goal; 907 int mss_now, size_goal;
909 int err, copied; 908 int sg, err, copied;
910 long timeo; 909 long timeo;
911 910
912 lock_sock(sk); 911 lock_sock(sk);
@@ -934,6 +933,8 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
934 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 933 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
935 goto out_err; 934 goto out_err;
936 935
936 sg = sk->sk_route_caps & NETIF_F_SG;
937
937 while (--iovlen >= 0) { 938 while (--iovlen >= 0) {
938 int seglen = iov->iov_len; 939 int seglen = iov->iov_len;
939 unsigned char __user *from = iov->iov_base; 940 unsigned char __user *from = iov->iov_base;
@@ -959,8 +960,9 @@ new_segment:
959 if (!sk_stream_memory_free(sk)) 960 if (!sk_stream_memory_free(sk))
960 goto wait_for_sndbuf; 961 goto wait_for_sndbuf;
961 962
962 skb = sk_stream_alloc_skb(sk, select_size(sk), 963 skb = sk_stream_alloc_skb(sk,
963 sk->sk_allocation); 964 select_size(sk, sg),
965 sk->sk_allocation);
964 if (!skb) 966 if (!skb)
965 goto wait_for_memory; 967 goto wait_for_memory;
966 968
@@ -997,9 +999,7 @@ new_segment:
997 /* We can extend the last page 999 /* We can extend the last page
998 * fragment. */ 1000 * fragment. */
999 merge = 1; 1001 merge = 1;
1000 } else if (i == MAX_SKB_FRAGS || 1002 } else if (i == MAX_SKB_FRAGS || !sg) {
1001 (!i &&
1002 !(sk->sk_route_caps & NETIF_F_SG))) {
1003 /* Need to add new fragment and cannot 1003 /* Need to add new fragment and cannot
1004 * do this because interface is non-SG, 1004 * do this because interface is non-SG,
1005 * or because all the page slots are 1005 * or because all the page slots are
@@ -2229,6 +2229,20 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2229 } 2229 }
2230 break; 2230 break;
2231 2231
2232 case TCP_THIN_LINEAR_TIMEOUTS:
2233 if (val < 0 || val > 1)
2234 err = -EINVAL;
2235 else
2236 tp->thin_lto = val;
2237 break;
2238
2239 case TCP_THIN_DUPACK:
2240 if (val < 0 || val > 1)
2241 err = -EINVAL;
2242 else
2243 tp->thin_dupack = val;
2244 break;
2245
2232 case TCP_CORK: 2246 case TCP_CORK:
2233 /* When set indicates to always queue non-full frames. 2247 /* When set indicates to always queue non-full frames.
2234 * Later the user clears this option and we transmit 2248 * Later the user clears this option and we transmit
@@ -2788,10 +2802,10 @@ EXPORT_SYMBOL(tcp_gro_complete);
2788 2802
2789#ifdef CONFIG_TCP_MD5SIG 2803#ifdef CONFIG_TCP_MD5SIG
2790static unsigned long tcp_md5sig_users; 2804static unsigned long tcp_md5sig_users;
2791static struct tcp_md5sig_pool **tcp_md5sig_pool; 2805static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool;
2792static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); 2806static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2793 2807
2794static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool) 2808static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool)
2795{ 2809{
2796 int cpu; 2810 int cpu;
2797 for_each_possible_cpu(cpu) { 2811 for_each_possible_cpu(cpu) {
@@ -2808,7 +2822,7 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2808 2822
2809void tcp_free_md5sig_pool(void) 2823void tcp_free_md5sig_pool(void)
2810{ 2824{
2811 struct tcp_md5sig_pool **pool = NULL; 2825 struct tcp_md5sig_pool * __percpu *pool = NULL;
2812 2826
2813 spin_lock_bh(&tcp_md5sig_pool_lock); 2827 spin_lock_bh(&tcp_md5sig_pool_lock);
2814 if (--tcp_md5sig_users == 0) { 2828 if (--tcp_md5sig_users == 0) {
@@ -2822,10 +2836,11 @@ void tcp_free_md5sig_pool(void)
2822 2836
2823EXPORT_SYMBOL(tcp_free_md5sig_pool); 2837EXPORT_SYMBOL(tcp_free_md5sig_pool);
2824 2838
2825static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(struct sock *sk) 2839static struct tcp_md5sig_pool * __percpu *
2840__tcp_alloc_md5sig_pool(struct sock *sk)
2826{ 2841{
2827 int cpu; 2842 int cpu;
2828 struct tcp_md5sig_pool **pool; 2843 struct tcp_md5sig_pool * __percpu *pool;
2829 2844
2830 pool = alloc_percpu(struct tcp_md5sig_pool *); 2845 pool = alloc_percpu(struct tcp_md5sig_pool *);
2831 if (!pool) 2846 if (!pool)
@@ -2852,9 +2867,9 @@ out_free:
2852 return NULL; 2867 return NULL;
2853} 2868}
2854 2869
2855struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *sk) 2870struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
2856{ 2871{
2857 struct tcp_md5sig_pool **pool; 2872 struct tcp_md5sig_pool * __percpu *pool;
2858 int alloc = 0; 2873 int alloc = 0;
2859 2874
2860retry: 2875retry:
@@ -2873,7 +2888,9 @@ retry:
2873 2888
2874 if (alloc) { 2889 if (alloc) {
2875 /* we cannot hold spinlock here because this may sleep. */ 2890 /* we cannot hold spinlock here because this may sleep. */
2876 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(sk); 2891 struct tcp_md5sig_pool * __percpu *p;
2892
2893 p = __tcp_alloc_md5sig_pool(sk);
2877 spin_lock_bh(&tcp_md5sig_pool_lock); 2894 spin_lock_bh(&tcp_md5sig_pool_lock);
2878 if (!p) { 2895 if (!p) {
2879 tcp_md5sig_users--; 2896 tcp_md5sig_users--;
@@ -2897,7 +2914,7 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2897 2914
2898struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) 2915struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2899{ 2916{
2900 struct tcp_md5sig_pool **p; 2917 struct tcp_md5sig_pool * __percpu *p;
2901 spin_lock_bh(&tcp_md5sig_pool_lock); 2918 spin_lock_bh(&tcp_md5sig_pool_lock);
2902 p = tcp_md5sig_pool; 2919 p = tcp_md5sig_pool;
2903 if (p) 2920 if (p)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3fddc69cccc..788851ca8c5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -89,6 +89,8 @@ int sysctl_tcp_frto __read_mostly = 2;
89int sysctl_tcp_frto_response __read_mostly; 89int sysctl_tcp_frto_response __read_mostly;
90int sysctl_tcp_nometrics_save __read_mostly; 90int sysctl_tcp_nometrics_save __read_mostly;
91 91
92int sysctl_tcp_thin_dupack __read_mostly;
93
92int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 94int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
93int sysctl_tcp_abc __read_mostly; 95int sysctl_tcp_abc __read_mostly;
94 96
@@ -2447,6 +2449,16 @@ static int tcp_time_to_recover(struct sock *sk)
2447 return 1; 2449 return 1;
2448 } 2450 }
2449 2451
2452 /* If a thin stream is detected, retransmit after first
2453 * received dupack. Employ only if SACK is supported in order
2454 * to avoid possible corner-case series of spurious retransmissions
2455 * Use only if there are no unsent data.
2456 */
2457 if ((tp->thin_dupack || sysctl_tcp_thin_dupack) &&
2458 tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 &&
2459 tcp_is_sack(tp) && !tcp_send_head(sk))
2460 return 1;
2461
2450 return 0; 2462 return 0;
2451} 2463}
2452 2464
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 65b8ebfd078..c3588b4fd97 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -742,9 +742,9 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
742 * This still operates on a request_sock only, not on a big 742 * This still operates on a request_sock only, not on a big
743 * socket. 743 * socket.
744 */ 744 */
745static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, 745static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
746 struct request_sock *req, 746 struct request_sock *req,
747 struct request_values *rvp) 747 struct request_values *rvp)
748{ 748{
749 const struct inet_request_sock *ireq = inet_rsk(req); 749 const struct inet_request_sock *ireq = inet_rsk(req);
750 int err = -1; 750 int err = -1;
@@ -775,10 +775,11 @@ static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
775 return err; 775 return err;
776} 776}
777 777
778static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, 778static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
779 struct request_values *rvp) 779 struct request_values *rvp)
780{ 780{
781 return __tcp_v4_send_synack(sk, NULL, req, rvp); 781 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
782 return tcp_v4_send_synack(sk, NULL, req, rvp);
782} 783}
783 784
784/* 785/*
@@ -1192,10 +1193,11 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1192struct request_sock_ops tcp_request_sock_ops __read_mostly = { 1193struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1193 .family = PF_INET, 1194 .family = PF_INET,
1194 .obj_size = sizeof(struct tcp_request_sock), 1195 .obj_size = sizeof(struct tcp_request_sock),
1195 .rtx_syn_ack = tcp_v4_send_synack, 1196 .rtx_syn_ack = tcp_v4_rtx_synack,
1196 .send_ack = tcp_v4_reqsk_send_ack, 1197 .send_ack = tcp_v4_reqsk_send_ack,
1197 .destructor = tcp_v4_reqsk_destructor, 1198 .destructor = tcp_v4_reqsk_destructor,
1198 .send_reset = tcp_v4_send_reset, 1199 .send_reset = tcp_v4_send_reset,
1200 .syn_ack_timeout = tcp_syn_ack_timeout,
1199}; 1201};
1200 1202
1201#ifdef CONFIG_TCP_MD5SIG 1203#ifdef CONFIG_TCP_MD5SIG
@@ -1373,8 +1375,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1373 } 1375 }
1374 tcp_rsk(req)->snt_isn = isn; 1376 tcp_rsk(req)->snt_isn = isn;
1375 1377
1376 if (__tcp_v4_send_synack(sk, dst, req, 1378 if (tcp_v4_send_synack(sk, dst, req,
1377 (struct request_values *)&tmp_ext) || 1379 (struct request_values *)&tmp_ext) ||
1378 want_cookie) 1380 want_cookie)
1379 goto drop_and_free; 1381 goto drop_and_free;
1380 1382
@@ -1649,6 +1651,9 @@ int tcp_v4_rcv(struct sk_buff *skb)
1649 if (!sk) 1651 if (!sk)
1650 goto no_tcp_socket; 1652 goto no_tcp_socket;
1651 1653
1654 if (iph->ttl < inet_sk(sk)->min_ttl)
1655 goto discard_and_relse;
1656
1652process: 1657process:
1653 if (sk->sk_state == TCP_TIME_WAIT) 1658 if (sk->sk_state == TCP_TIME_WAIT)
1654 goto do_time_wait; 1659 goto do_time_wait;
@@ -2425,12 +2430,12 @@ static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2425 }, 2430 },
2426}; 2431};
2427 2432
2428static int tcp4_proc_init_net(struct net *net) 2433static int __net_init tcp4_proc_init_net(struct net *net)
2429{ 2434{
2430 return tcp_proc_register(net, &tcp4_seq_afinfo); 2435 return tcp_proc_register(net, &tcp4_seq_afinfo);
2431} 2436}
2432 2437
2433static void tcp4_proc_exit_net(struct net *net) 2438static void __net_exit tcp4_proc_exit_net(struct net *net)
2434{ 2439{
2435 tcp_proc_unregister(net, &tcp4_seq_afinfo); 2440 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2436} 2441}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 383ce237640..4a1605d3f90 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -183,7 +183,8 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
183 */ 183 */
184void tcp_select_initial_window(int __space, __u32 mss, 184void tcp_select_initial_window(int __space, __u32 mss,
185 __u32 *rcv_wnd, __u32 *window_clamp, 185 __u32 *rcv_wnd, __u32 *window_clamp,
186 int wscale_ok, __u8 *rcv_wscale) 186 int wscale_ok, __u8 *rcv_wscale,
187 __u32 init_rcv_wnd)
187{ 188{
188 unsigned int space = (__space < 0 ? 0 : __space); 189 unsigned int space = (__space < 0 ? 0 : __space);
189 190
@@ -232,7 +233,13 @@ void tcp_select_initial_window(int __space, __u32 mss,
232 init_cwnd = 2; 233 init_cwnd = 2;
233 else if (mss > 1460) 234 else if (mss > 1460)
234 init_cwnd = 3; 235 init_cwnd = 3;
235 if (*rcv_wnd > init_cwnd * mss) 236 /* when initializing use the value from init_rcv_wnd
237 * rather than the default from above
238 */
239 if (init_rcv_wnd &&
240 (*rcv_wnd > init_rcv_wnd * mss))
241 *rcv_wnd = init_rcv_wnd * mss;
242 else if (*rcv_wnd > init_cwnd * mss)
236 *rcv_wnd = init_cwnd * mss; 243 *rcv_wnd = init_cwnd * mss;
237 } 244 }
238 245
@@ -1794,11 +1801,6 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1794void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 1801void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
1795 int nonagle) 1802 int nonagle)
1796{ 1803{
1797 struct sk_buff *skb = tcp_send_head(sk);
1798
1799 if (!skb)
1800 return;
1801
1802 /* If we are closed, the bytes will have to remain here. 1804 /* If we are closed, the bytes will have to remain here.
1803 * In time closedown will finish, we empty the write queue and 1805 * In time closedown will finish, we empty the write queue and
1804 * all will be happy. 1806 * all will be happy.
@@ -2422,7 +2424,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2422 &req->rcv_wnd, 2424 &req->rcv_wnd,
2423 &req->window_clamp, 2425 &req->window_clamp,
2424 ireq->wscale_ok, 2426 ireq->wscale_ok,
2425 &rcv_wscale); 2427 &rcv_wscale,
2428 dst_metric(dst, RTAX_INITRWND));
2426 ireq->rcv_wscale = rcv_wscale; 2429 ireq->rcv_wscale = rcv_wscale;
2427 } 2430 }
2428 2431
@@ -2549,7 +2552,8 @@ static void tcp_connect_init(struct sock *sk)
2549 &tp->rcv_wnd, 2552 &tp->rcv_wnd,
2550 &tp->window_clamp, 2553 &tp->window_clamp,
2551 sysctl_tcp_window_scaling, 2554 sysctl_tcp_window_scaling,
2552 &rcv_wscale); 2555 &rcv_wscale,
2556 dst_metric(dst, RTAX_INITRWND));
2553 2557
2554 tp->rx_opt.rcv_wscale = rcv_wscale; 2558 tp->rx_opt.rcv_wscale = rcv_wscale;
2555 tp->rcv_ssthresh = tp->rcv_wnd; 2559 tp->rcv_ssthresh = tp->rcv_wnd;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 8816a20c259..a17629b8912 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -29,6 +29,7 @@ int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL;
29int sysctl_tcp_retries1 __read_mostly = TCP_RETR1; 29int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
30int sysctl_tcp_retries2 __read_mostly = TCP_RETR2; 30int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
31int sysctl_tcp_orphan_retries __read_mostly; 31int sysctl_tcp_orphan_retries __read_mostly;
32int sysctl_tcp_thin_linear_timeouts __read_mostly;
32 33
33static void tcp_write_timer(unsigned long); 34static void tcp_write_timer(unsigned long);
34static void tcp_delack_timer(unsigned long); 35static void tcp_delack_timer(unsigned long);
@@ -415,7 +416,25 @@ void tcp_retransmit_timer(struct sock *sk)
415 icsk->icsk_retransmits++; 416 icsk->icsk_retransmits++;
416 417
417out_reset_timer: 418out_reset_timer:
418 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); 419 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
420 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
421 * might be increased if the stream oscillates between thin and thick,
422 * thus the old value might already be too high compared to the value
423 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
424 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
425 * exponential backoff behaviour to avoid continue hammering
426 * linear-timeout retransmissions into a black hole
427 */
428 if (sk->sk_state == TCP_ESTABLISHED &&
429 (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
430 tcp_stream_is_thin(tp) &&
431 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
432 icsk->icsk_backoff = 0;
433 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
434 } else {
435 /* Use normal (exponential) backoff */
436 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
437 }
419 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); 438 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
420 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1)) 439 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1))
421 __sk_dst_reset(sk); 440 __sk_dst_reset(sk);
@@ -474,6 +493,12 @@ static void tcp_synack_timer(struct sock *sk)
474 TCP_TIMEOUT_INIT, TCP_RTO_MAX); 493 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
475} 494}
476 495
496void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
497{
498 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
499}
500EXPORT_SYMBOL(tcp_syn_ack_timeout);
501
477void tcp_set_keepalive(struct sock *sk, int val) 502void tcp_set_keepalive(struct sock *sk, int val)
478{ 503{
479 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) 504 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f0126fdd7e0..608a5446d05 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1117,7 +1117,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1117 struct inet_sock *inet = inet_sk(sk); 1117 struct inet_sock *inet = inet_sk(sk);
1118 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; 1118 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
1119 struct sk_buff *skb; 1119 struct sk_buff *skb;
1120 unsigned int ulen, copied; 1120 unsigned int ulen;
1121 int peeked; 1121 int peeked;
1122 int err; 1122 int err;
1123 int is_udplite = IS_UDPLITE(sk); 1123 int is_udplite = IS_UDPLITE(sk);
@@ -1138,10 +1138,9 @@ try_again:
1138 goto out; 1138 goto out;
1139 1139
1140 ulen = skb->len - sizeof(struct udphdr); 1140 ulen = skb->len - sizeof(struct udphdr);
1141 copied = len; 1141 if (len > ulen)
1142 if (copied > ulen) 1142 len = ulen;
1143 copied = ulen; 1143 else if (len < ulen)
1144 else if (copied < ulen)
1145 msg->msg_flags |= MSG_TRUNC; 1144 msg->msg_flags |= MSG_TRUNC;
1146 1145
1147 /* 1146 /*
@@ -1150,14 +1149,14 @@ try_again:
1150 * coverage checksum (UDP-Lite), do it before the copy. 1149 * coverage checksum (UDP-Lite), do it before the copy.
1151 */ 1150 */
1152 1151
1153 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { 1152 if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
1154 if (udp_lib_checksum_complete(skb)) 1153 if (udp_lib_checksum_complete(skb))
1155 goto csum_copy_err; 1154 goto csum_copy_err;
1156 } 1155 }
1157 1156
1158 if (skb_csum_unnecessary(skb)) 1157 if (skb_csum_unnecessary(skb))
1159 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), 1158 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
1160 msg->msg_iov, copied); 1159 msg->msg_iov, len);
1161 else { 1160 else {
1162 err = skb_copy_and_csum_datagram_iovec(skb, 1161 err = skb_copy_and_csum_datagram_iovec(skb,
1163 sizeof(struct udphdr), 1162 sizeof(struct udphdr),
@@ -1186,7 +1185,7 @@ try_again:
1186 if (inet->cmsg_flags) 1185 if (inet->cmsg_flags)
1187 ip_cmsg_recv(msg, skb); 1186 ip_cmsg_recv(msg, skb);
1188 1187
1189 err = copied; 1188 err = len;
1190 if (flags & MSG_TRUNC) 1189 if (flags & MSG_TRUNC)
1191 err = ulen; 1190 err = ulen;
1192 1191
@@ -2027,12 +2026,12 @@ static struct udp_seq_afinfo udp4_seq_afinfo = {
2027 }, 2026 },
2028}; 2027};
2029 2028
2030static int udp4_proc_init_net(struct net *net) 2029static int __net_init udp4_proc_init_net(struct net *net)
2031{ 2030{
2032 return udp_proc_register(net, &udp4_seq_afinfo); 2031 return udp_proc_register(net, &udp4_seq_afinfo);
2033} 2032}
2034 2033
2035static void udp4_proc_exit_net(struct net *net) 2034static void __net_exit udp4_proc_exit_net(struct net *net)
2036{ 2035{
2037 udp_proc_unregister(net, &udp4_seq_afinfo); 2036 udp_proc_unregister(net, &udp4_seq_afinfo);
2038} 2037}
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 66f79513f4a..6610bf76369 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -81,12 +81,12 @@ static struct udp_seq_afinfo udplite4_seq_afinfo = {
81 }, 81 },
82}; 82};
83 83
84static int udplite4_proc_init_net(struct net *net) 84static int __net_init udplite4_proc_init_net(struct net *net)
85{ 85{
86 return udp_proc_register(net, &udplite4_seq_afinfo); 86 return udp_proc_register(net, &udplite4_seq_afinfo);
87} 87}
88 88
89static void udplite4_proc_exit_net(struct net *net) 89static void __net_exit udplite4_proc_exit_net(struct net *net)
90{ 90{
91 udp_proc_unregister(net, &udplite4_seq_afinfo); 91 udp_proc_unregister(net, &udplite4_seq_afinfo);
92} 92}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 143791da062..1b327f15e7e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -278,31 +278,31 @@ static void addrconf_mod_timer(struct inet6_ifaddr *ifp,
278 278
279static int snmp6_alloc_dev(struct inet6_dev *idev) 279static int snmp6_alloc_dev(struct inet6_dev *idev)
280{ 280{
281 if (snmp_mib_init((void **)idev->stats.ipv6, 281 if (snmp_mib_init((void __percpu **)idev->stats.ipv6,
282 sizeof(struct ipstats_mib)) < 0) 282 sizeof(struct ipstats_mib)) < 0)
283 goto err_ip; 283 goto err_ip;
284 if (snmp_mib_init((void **)idev->stats.icmpv6, 284 if (snmp_mib_init((void __percpu **)idev->stats.icmpv6,
285 sizeof(struct icmpv6_mib)) < 0) 285 sizeof(struct icmpv6_mib)) < 0)
286 goto err_icmp; 286 goto err_icmp;
287 if (snmp_mib_init((void **)idev->stats.icmpv6msg, 287 if (snmp_mib_init((void __percpu **)idev->stats.icmpv6msg,
288 sizeof(struct icmpv6msg_mib)) < 0) 288 sizeof(struct icmpv6msg_mib)) < 0)
289 goto err_icmpmsg; 289 goto err_icmpmsg;
290 290
291 return 0; 291 return 0;
292 292
293err_icmpmsg: 293err_icmpmsg:
294 snmp_mib_free((void **)idev->stats.icmpv6); 294 snmp_mib_free((void __percpu **)idev->stats.icmpv6);
295err_icmp: 295err_icmp:
296 snmp_mib_free((void **)idev->stats.ipv6); 296 snmp_mib_free((void __percpu **)idev->stats.ipv6);
297err_ip: 297err_ip:
298 return -ENOMEM; 298 return -ENOMEM;
299} 299}
300 300
301static void snmp6_free_dev(struct inet6_dev *idev) 301static void snmp6_free_dev(struct inet6_dev *idev)
302{ 302{
303 snmp_mib_free((void **)idev->stats.icmpv6msg); 303 snmp_mib_free((void __percpu **)idev->stats.icmpv6msg);
304 snmp_mib_free((void **)idev->stats.icmpv6); 304 snmp_mib_free((void __percpu **)idev->stats.icmpv6);
305 snmp_mib_free((void **)idev->stats.ipv6); 305 snmp_mib_free((void __percpu **)idev->stats.ipv6);
306} 306}
307 307
308/* Nobody refers to this device, we may destroy it. */ 308/* Nobody refers to this device, we may destroy it. */
@@ -2649,7 +2649,8 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2649 2649
2650 write_lock_bh(&addrconf_hash_lock); 2650 write_lock_bh(&addrconf_hash_lock);
2651 while ((ifa = *bifa) != NULL) { 2651 while ((ifa = *bifa) != NULL) {
2652 if (ifa->idev == idev) { 2652 if (ifa->idev == idev &&
2653 (how || !(ifa->flags&IFA_F_PERMANENT))) {
2653 *bifa = ifa->lst_next; 2654 *bifa = ifa->lst_next;
2654 ifa->lst_next = NULL; 2655 ifa->lst_next = NULL;
2655 addrconf_del_timer(ifa); 2656 addrconf_del_timer(ifa);
@@ -2689,18 +2690,30 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2689 write_lock_bh(&idev->lock); 2690 write_lock_bh(&idev->lock);
2690 } 2691 }
2691#endif 2692#endif
2692 while ((ifa = idev->addr_list) != NULL) { 2693 bifa = &idev->addr_list;
2693 idev->addr_list = ifa->if_next; 2694 while ((ifa = *bifa) != NULL) {
2694 ifa->if_next = NULL; 2695 if (how == 0 && (ifa->flags&IFA_F_PERMANENT)) {
2695 ifa->dead = 1; 2696 /* Retain permanent address on admin down */
2696 addrconf_del_timer(ifa); 2697 bifa = &ifa->if_next;
2697 write_unlock_bh(&idev->lock); 2698
2699 /* Restart DAD if needed when link comes back up */
2700 if ( !((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
2701 idev->cnf.accept_dad <= 0 ||
2702 (ifa->flags & IFA_F_NODAD)))
2703 ifa->flags |= IFA_F_TENTATIVE;
2704 } else {
2705 *bifa = ifa->if_next;
2706 ifa->if_next = NULL;
2698 2707
2699 __ipv6_ifa_notify(RTM_DELADDR, ifa); 2708 ifa->dead = 1;
2700 atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); 2709 write_unlock_bh(&idev->lock);
2701 in6_ifa_put(ifa);
2702 2710
2703 write_lock_bh(&idev->lock); 2711 __ipv6_ifa_notify(RTM_DELADDR, ifa);
2712 atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
2713 in6_ifa_put(ifa);
2714
2715 write_lock_bh(&idev->lock);
2716 }
2704 } 2717 }
2705 write_unlock_bh(&idev->lock); 2718 write_unlock_bh(&idev->lock);
2706 2719
@@ -2792,14 +2805,14 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
2792 read_lock_bh(&idev->lock); 2805 read_lock_bh(&idev->lock);
2793 if (ifp->dead) 2806 if (ifp->dead)
2794 goto out; 2807 goto out;
2795 spin_lock_bh(&ifp->lock);
2796 2808
2809 spin_lock(&ifp->lock);
2797 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || 2810 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
2798 idev->cnf.accept_dad < 1 || 2811 idev->cnf.accept_dad < 1 ||
2799 !(ifp->flags&IFA_F_TENTATIVE) || 2812 !(ifp->flags&IFA_F_TENTATIVE) ||
2800 ifp->flags & IFA_F_NODAD) { 2813 ifp->flags & IFA_F_NODAD) {
2801 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); 2814 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
2802 spin_unlock_bh(&ifp->lock); 2815 spin_unlock(&ifp->lock);
2803 read_unlock_bh(&idev->lock); 2816 read_unlock_bh(&idev->lock);
2804 2817
2805 addrconf_dad_completed(ifp); 2818 addrconf_dad_completed(ifp);
@@ -2807,7 +2820,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
2807 } 2820 }
2808 2821
2809 if (!(idev->if_flags & IF_READY)) { 2822 if (!(idev->if_flags & IF_READY)) {
2810 spin_unlock_bh(&ifp->lock); 2823 spin_unlock(&ifp->lock);
2811 read_unlock_bh(&idev->lock); 2824 read_unlock_bh(&idev->lock);
2812 /* 2825 /*
2813 * If the device is not ready: 2826 * If the device is not ready:
@@ -2827,7 +2840,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
2827 ip6_ins_rt(ifp->rt); 2840 ip6_ins_rt(ifp->rt);
2828 2841
2829 addrconf_dad_kick(ifp); 2842 addrconf_dad_kick(ifp);
2830 spin_unlock_bh(&ifp->lock); 2843 spin_unlock(&ifp->lock);
2831out: 2844out:
2832 read_unlock_bh(&idev->lock); 2845 read_unlock_bh(&idev->lock);
2833} 2846}
@@ -2843,14 +2856,15 @@ static void addrconf_dad_timer(unsigned long data)
2843 read_unlock_bh(&idev->lock); 2856 read_unlock_bh(&idev->lock);
2844 goto out; 2857 goto out;
2845 } 2858 }
2846 spin_lock_bh(&ifp->lock); 2859
2860 spin_lock(&ifp->lock);
2847 if (ifp->probes == 0) { 2861 if (ifp->probes == 0) {
2848 /* 2862 /*
2849 * DAD was successful 2863 * DAD was successful
2850 */ 2864 */
2851 2865
2852 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); 2866 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
2853 spin_unlock_bh(&ifp->lock); 2867 spin_unlock(&ifp->lock);
2854 read_unlock_bh(&idev->lock); 2868 read_unlock_bh(&idev->lock);
2855 2869
2856 addrconf_dad_completed(ifp); 2870 addrconf_dad_completed(ifp);
@@ -2860,7 +2874,7 @@ static void addrconf_dad_timer(unsigned long data)
2860 2874
2861 ifp->probes--; 2875 ifp->probes--;
2862 addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time); 2876 addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time);
2863 spin_unlock_bh(&ifp->lock); 2877 spin_unlock(&ifp->lock);
2864 read_unlock_bh(&idev->lock); 2878 read_unlock_bh(&idev->lock);
2865 2879
2866 /* send a neighbour solicitation for our addr */ 2880 /* send a neighbour solicitation for our addr */
@@ -2908,12 +2922,12 @@ static void addrconf_dad_run(struct inet6_dev *idev) {
2908 2922
2909 read_lock_bh(&idev->lock); 2923 read_lock_bh(&idev->lock);
2910 for (ifp = idev->addr_list; ifp; ifp = ifp->if_next) { 2924 for (ifp = idev->addr_list; ifp; ifp = ifp->if_next) {
2911 spin_lock_bh(&ifp->lock); 2925 spin_lock(&ifp->lock);
2912 if (!(ifp->flags & IFA_F_TENTATIVE)) { 2926 if (!(ifp->flags & IFA_F_TENTATIVE)) {
2913 spin_unlock_bh(&ifp->lock); 2927 spin_unlock(&ifp->lock);
2914 continue; 2928 continue;
2915 } 2929 }
2916 spin_unlock_bh(&ifp->lock); 2930 spin_unlock(&ifp->lock);
2917 addrconf_dad_kick(ifp); 2931 addrconf_dad_kick(ifp);
2918 } 2932 }
2919 read_unlock_bh(&idev->lock); 2933 read_unlock_bh(&idev->lock);
@@ -3030,14 +3044,14 @@ static const struct file_operations if6_fops = {
3030 .release = seq_release_net, 3044 .release = seq_release_net,
3031}; 3045};
3032 3046
3033static int if6_proc_net_init(struct net *net) 3047static int __net_init if6_proc_net_init(struct net *net)
3034{ 3048{
3035 if (!proc_net_fops_create(net, "if_inet6", S_IRUGO, &if6_fops)) 3049 if (!proc_net_fops_create(net, "if_inet6", S_IRUGO, &if6_fops))
3036 return -ENOMEM; 3050 return -ENOMEM;
3037 return 0; 3051 return 0;
3038} 3052}
3039 3053
3040static void if6_proc_net_exit(struct net *net) 3054static void __net_exit if6_proc_net_exit(struct net *net)
3041{ 3055{
3042 proc_net_remove(net, "if_inet6"); 3056 proc_net_remove(net, "if_inet6");
3043} 3057}
@@ -3755,8 +3769,8 @@ static inline size_t inet6_if_nlmsg_size(void)
3755 ); 3769 );
3756} 3770}
3757 3771
3758static inline void __snmp6_fill_stats(u64 *stats, void **mib, int items, 3772static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib,
3759 int bytes) 3773 int items, int bytes)
3760{ 3774{
3761 int i; 3775 int i;
3762 int pad = bytes - sizeof(u64) * items; 3776 int pad = bytes - sizeof(u64) * items;
@@ -3775,10 +3789,10 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
3775{ 3789{
3776 switch(attrtype) { 3790 switch(attrtype) {
3777 case IFLA_INET6_STATS: 3791 case IFLA_INET6_STATS:
3778 __snmp6_fill_stats(stats, (void **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes); 3792 __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes);
3779 break; 3793 break;
3780 case IFLA_INET6_ICMP6STATS: 3794 case IFLA_INET6_ICMP6STATS:
3781 __snmp6_fill_stats(stats, (void **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes); 3795 __snmp6_fill_stats(stats, (void __percpu **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes);
3782 break; 3796 break;
3783 } 3797 }
3784} 3798}
@@ -4414,8 +4428,7 @@ static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
4414 4428
4415static void addrconf_sysctl_register(struct inet6_dev *idev) 4429static void addrconf_sysctl_register(struct inet6_dev *idev)
4416{ 4430{
4417 neigh_sysctl_register(idev->dev, idev->nd_parms, NET_IPV6, 4431 neigh_sysctl_register(idev->dev, idev->nd_parms, "ipv6",
4418 NET_IPV6_NEIGH, "ipv6",
4419 &ndisc_ifinfo_sysctl_change); 4432 &ndisc_ifinfo_sysctl_change);
4420 __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name, 4433 __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
4421 idev, &idev->cnf); 4434 idev, &idev->cnf);
@@ -4430,7 +4443,7 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev)
4430 4443
4431#endif 4444#endif
4432 4445
4433static int addrconf_init_net(struct net *net) 4446static int __net_init addrconf_init_net(struct net *net)
4434{ 4447{
4435 int err; 4448 int err;
4436 struct ipv6_devconf *all, *dflt; 4449 struct ipv6_devconf *all, *dflt;
@@ -4479,7 +4492,7 @@ err_alloc_all:
4479 return err; 4492 return err;
4480} 4493}
4481 4494
4482static void addrconf_exit_net(struct net *net) 4495static void __net_exit addrconf_exit_net(struct net *net)
4483{ 4496{
4484#ifdef CONFIG_SYSCTL 4497#ifdef CONFIG_SYSCTL
4485 __addrconf_sysctl_unregister(net->ipv6.devconf_dflt); 4498 __addrconf_sysctl_unregister(net->ipv6.devconf_dflt);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 12e69d364dd..37d14e735c2 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -971,41 +971,41 @@ static void ipv6_packet_cleanup(void)
971 971
972static int __net_init ipv6_init_mibs(struct net *net) 972static int __net_init ipv6_init_mibs(struct net *net)
973{ 973{
974 if (snmp_mib_init((void **)net->mib.udp_stats_in6, 974 if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6,
975 sizeof (struct udp_mib)) < 0) 975 sizeof (struct udp_mib)) < 0)
976 return -ENOMEM; 976 return -ENOMEM;
977 if (snmp_mib_init((void **)net->mib.udplite_stats_in6, 977 if (snmp_mib_init((void __percpu **)net->mib.udplite_stats_in6,
978 sizeof (struct udp_mib)) < 0) 978 sizeof (struct udp_mib)) < 0)
979 goto err_udplite_mib; 979 goto err_udplite_mib;
980 if (snmp_mib_init((void **)net->mib.ipv6_statistics, 980 if (snmp_mib_init((void __percpu **)net->mib.ipv6_statistics,
981 sizeof(struct ipstats_mib)) < 0) 981 sizeof(struct ipstats_mib)) < 0)
982 goto err_ip_mib; 982 goto err_ip_mib;
983 if (snmp_mib_init((void **)net->mib.icmpv6_statistics, 983 if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics,
984 sizeof(struct icmpv6_mib)) < 0) 984 sizeof(struct icmpv6_mib)) < 0)
985 goto err_icmp_mib; 985 goto err_icmp_mib;
986 if (snmp_mib_init((void **)net->mib.icmpv6msg_statistics, 986 if (snmp_mib_init((void __percpu **)net->mib.icmpv6msg_statistics,
987 sizeof(struct icmpv6msg_mib)) < 0) 987 sizeof(struct icmpv6msg_mib)) < 0)
988 goto err_icmpmsg_mib; 988 goto err_icmpmsg_mib;
989 return 0; 989 return 0;
990 990
991err_icmpmsg_mib: 991err_icmpmsg_mib:
992 snmp_mib_free((void **)net->mib.icmpv6_statistics); 992 snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics);
993err_icmp_mib: 993err_icmp_mib:
994 snmp_mib_free((void **)net->mib.ipv6_statistics); 994 snmp_mib_free((void __percpu **)net->mib.ipv6_statistics);
995err_ip_mib: 995err_ip_mib:
996 snmp_mib_free((void **)net->mib.udplite_stats_in6); 996 snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6);
997err_udplite_mib: 997err_udplite_mib:
998 snmp_mib_free((void **)net->mib.udp_stats_in6); 998 snmp_mib_free((void __percpu **)net->mib.udp_stats_in6);
999 return -ENOMEM; 999 return -ENOMEM;
1000} 1000}
1001 1001
1002static void __net_exit ipv6_cleanup_mibs(struct net *net) 1002static void ipv6_cleanup_mibs(struct net *net)
1003{ 1003{
1004 snmp_mib_free((void **)net->mib.udp_stats_in6); 1004 snmp_mib_free((void __percpu **)net->mib.udp_stats_in6);
1005 snmp_mib_free((void **)net->mib.udplite_stats_in6); 1005 snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6);
1006 snmp_mib_free((void **)net->mib.ipv6_statistics); 1006 snmp_mib_free((void __percpu **)net->mib.ipv6_statistics);
1007 snmp_mib_free((void **)net->mib.icmpv6_statistics); 1007 snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics);
1008 snmp_mib_free((void **)net->mib.icmpv6msg_statistics); 1008 snmp_mib_free((void __percpu **)net->mib.icmpv6msg_statistics);
1009} 1009}
1010 1010
1011static int __net_init inet6_net_init(struct net *net) 1011static int __net_init inet6_net_init(struct net *net)
@@ -1042,7 +1042,7 @@ out:
1042#endif 1042#endif
1043} 1043}
1044 1044
1045static void inet6_net_exit(struct net *net) 1045static void __net_exit inet6_net_exit(struct net *net)
1046{ 1046{
1047#ifdef CONFIG_PROC_FS 1047#ifdef CONFIG_PROC_FS
1048 udp6_proc_exit(net); 1048 udp6_proc_exit(net);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index c2f300c314b..5ac89025f9d 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -614,7 +614,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
614 type != ICMPV6_PKT_TOOBIG) 614 type != ICMPV6_PKT_TOOBIG)
615 return; 615 return;
616 616
617 x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6); 617 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
618 if (!x) 618 if (!x)
619 return; 619 return;
620 620
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index f1c74c8ef9d..c4f6ca32fa7 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -538,7 +538,7 @@ static const struct file_operations ac6_seq_fops = {
538 .release = seq_release_net, 538 .release = seq_release_net,
539}; 539};
540 540
541int ac6_proc_init(struct net *net) 541int __net_init ac6_proc_init(struct net *net)
542{ 542{
543 if (!proc_net_fops_create(net, "anycast6", S_IRUGO, &ac6_seq_fops)) 543 if (!proc_net_fops_create(net, "anycast6", S_IRUGO, &ac6_seq_fops))
544 return -ENOMEM; 544 return -ENOMEM;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 668a46b655e..ee9b93bdd6a 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -365,7 +365,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
365 type != ICMPV6_PKT_TOOBIG) 365 type != ICMPV6_PKT_TOOBIG)
366 return; 366 return;
367 367
368 x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6); 368 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6);
369 if (!x) 369 if (!x)
370 return; 370 return;
371 printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n", 371 printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n",
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 4bac362b133..074f2c084f9 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -481,7 +481,7 @@ looped_back:
481 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), 481 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
482 IPSTATS_MIB_INHDRERRORS); 482 IPSTATS_MIB_INHDRERRORS);
483 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 483 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
484 0, skb->dev); 484 0);
485 kfree_skb(skb); 485 kfree_skb(skb);
486 return -1; 486 return -1;
487 } 487 }
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index b7aa7c64cc4..551882b9dfd 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -262,7 +262,7 @@ static struct fib_rules_ops fib6_rules_ops_template = {
262 .fro_net = &init_net, 262 .fro_net = &init_net,
263}; 263};
264 264
265static int fib6_rules_net_init(struct net *net) 265static int __net_init fib6_rules_net_init(struct net *net)
266{ 266{
267 struct fib_rules_ops *ops; 267 struct fib_rules_ops *ops;
268 int err = -ENOMEM; 268 int err = -ENOMEM;
@@ -291,7 +291,7 @@ out_fib6_rules_ops:
291 goto out; 291 goto out;
292} 292}
293 293
294static void fib6_rules_net_exit(struct net *net) 294static void __net_exit fib6_rules_net_exit(struct net *net)
295{ 295{
296 fib_rules_unregister(net->ipv6.fib6_rules_ops); 296 fib_rules_unregister(net->ipv6.fib6_rules_ops);
297} 297}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 4ae661bc367..eb9abe24bdf 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -67,11 +67,6 @@
67#include <asm/uaccess.h> 67#include <asm/uaccess.h>
68#include <asm/system.h> 68#include <asm/system.h>
69 69
70DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
71EXPORT_SYMBOL(icmpv6_statistics);
72DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics) __read_mostly;
73EXPORT_SYMBOL(icmpv6msg_statistics);
74
75/* 70/*
76 * The ICMP socket(s). This is the most convenient way to flow control 71 * The ICMP socket(s). This is the most convenient way to flow control
77 * our ICMP output as well as maintain a clean interface throughout 72 * our ICMP output as well as maintain a clean interface throughout
@@ -119,7 +114,7 @@ static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
119 */ 114 */
120void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) 115void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
121{ 116{
122 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev); 117 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos);
123 kfree_skb(skb); 118 kfree_skb(skb);
124} 119}
125 120
@@ -305,8 +300,7 @@ static inline void mip6_addr_swap(struct sk_buff *skb) {}
305/* 300/*
306 * Send an ICMP message in response to a packet in error 301 * Send an ICMP message in response to a packet in error
307 */ 302 */
308void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, 303void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
309 struct net_device *dev)
310{ 304{
311 struct net *net = dev_net(skb->dev); 305 struct net *net = dev_net(skb->dev);
312 struct inet6_dev *idev = NULL; 306 struct inet6_dev *idev = NULL;
@@ -951,7 +945,7 @@ ctl_table ipv6_icmp_table_template[] = {
951 { }, 945 { },
952}; 946};
953 947
954struct ctl_table *ipv6_icmp_sysctl_init(struct net *net) 948struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
955{ 949{
956 struct ctl_table *table; 950 struct ctl_table *table;
957 951
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 0e93ca56eb6..2f9847924fa 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -93,29 +93,20 @@ static __u32 rt_sernum;
93 93
94static void fib6_gc_timer_cb(unsigned long arg); 94static void fib6_gc_timer_cb(unsigned long arg);
95 95
96static struct fib6_walker_t fib6_walker_list = { 96static LIST_HEAD(fib6_walkers);
97 .prev = &fib6_walker_list, 97#define FOR_WALKERS(w) list_for_each_entry(w, &fib6_walkers, lh)
98 .next = &fib6_walker_list,
99};
100
101#define FOR_WALKERS(w) for ((w)=fib6_walker_list.next; (w) != &fib6_walker_list; (w)=(w)->next)
102 98
103static inline void fib6_walker_link(struct fib6_walker_t *w) 99static inline void fib6_walker_link(struct fib6_walker_t *w)
104{ 100{
105 write_lock_bh(&fib6_walker_lock); 101 write_lock_bh(&fib6_walker_lock);
106 w->next = fib6_walker_list.next; 102 list_add(&w->lh, &fib6_walkers);
107 w->prev = &fib6_walker_list;
108 w->next->prev = w;
109 w->prev->next = w;
110 write_unlock_bh(&fib6_walker_lock); 103 write_unlock_bh(&fib6_walker_lock);
111} 104}
112 105
113static inline void fib6_walker_unlink(struct fib6_walker_t *w) 106static inline void fib6_walker_unlink(struct fib6_walker_t *w)
114{ 107{
115 write_lock_bh(&fib6_walker_lock); 108 write_lock_bh(&fib6_walker_lock);
116 w->next->prev = w->prev; 109 list_del(&w->lh);
117 w->prev->next = w->next;
118 w->prev = w->next = w;
119 write_unlock_bh(&fib6_walker_lock); 110 write_unlock_bh(&fib6_walker_lock);
120} 111}
121static __inline__ u32 fib6_new_sernum(void) 112static __inline__ u32 fib6_new_sernum(void)
@@ -239,7 +230,7 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
239 return NULL; 230 return NULL;
240} 231}
241 232
242static void fib6_tables_init(struct net *net) 233static void __net_init fib6_tables_init(struct net *net)
243{ 234{
244 fib6_link_table(net, net->ipv6.fib6_main_tbl); 235 fib6_link_table(net, net->ipv6.fib6_main_tbl);
245 fib6_link_table(net, net->ipv6.fib6_local_tbl); 236 fib6_link_table(net, net->ipv6.fib6_local_tbl);
@@ -262,7 +253,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl,
262 return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl, flags); 253 return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl, flags);
263} 254}
264 255
265static void fib6_tables_init(struct net *net) 256static void __net_init fib6_tables_init(struct net *net)
266{ 257{
267 fib6_link_table(net, net->ipv6.fib6_main_tbl); 258 fib6_link_table(net, net->ipv6.fib6_main_tbl);
268} 259}
@@ -319,12 +310,26 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
319 w->root = &table->tb6_root; 310 w->root = &table->tb6_root;
320 311
321 if (cb->args[4] == 0) { 312 if (cb->args[4] == 0) {
313 w->count = 0;
314 w->skip = 0;
315
322 read_lock_bh(&table->tb6_lock); 316 read_lock_bh(&table->tb6_lock);
323 res = fib6_walk(w); 317 res = fib6_walk(w);
324 read_unlock_bh(&table->tb6_lock); 318 read_unlock_bh(&table->tb6_lock);
325 if (res > 0) 319 if (res > 0) {
326 cb->args[4] = 1; 320 cb->args[4] = 1;
321 cb->args[5] = w->root->fn_sernum;
322 }
327 } else { 323 } else {
324 if (cb->args[5] != w->root->fn_sernum) {
325 /* Begin at the root if the tree changed */
326 cb->args[5] = w->root->fn_sernum;
327 w->state = FWS_INIT;
328 w->node = w->root;
329 w->skip = w->count;
330 } else
331 w->skip = 0;
332
328 read_lock_bh(&table->tb6_lock); 333 read_lock_bh(&table->tb6_lock);
329 res = fib6_walk_continue(w); 334 res = fib6_walk_continue(w);
330 read_unlock_bh(&table->tb6_lock); 335 read_unlock_bh(&table->tb6_lock);
@@ -1250,9 +1255,18 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
1250 w->leaf = fn->leaf; 1255 w->leaf = fn->leaf;
1251 case FWS_C: 1256 case FWS_C:
1252 if (w->leaf && fn->fn_flags&RTN_RTINFO) { 1257 if (w->leaf && fn->fn_flags&RTN_RTINFO) {
1253 int err = w->func(w); 1258 int err;
1259
1260 if (w->count < w->skip) {
1261 w->count++;
1262 continue;
1263 }
1264
1265 err = w->func(w);
1254 if (err) 1266 if (err)
1255 return err; 1267 return err;
1268
1269 w->count++;
1256 continue; 1270 continue;
1257 } 1271 }
1258 w->state = FWS_U; 1272 w->state = FWS_U;
@@ -1346,6 +1360,8 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root,
1346 c.w.root = root; 1360 c.w.root = root;
1347 c.w.func = fib6_clean_node; 1361 c.w.func = fib6_clean_node;
1348 c.w.prune = prune; 1362 c.w.prune = prune;
1363 c.w.count = 0;
1364 c.w.skip = 0;
1349 c.func = func; 1365 c.func = func;
1350 c.arg = arg; 1366 c.arg = arg;
1351 c.net = net; 1367 c.net = net;
@@ -1469,7 +1485,7 @@ static void fib6_gc_timer_cb(unsigned long arg)
1469 fib6_run_gc(0, (struct net *)arg); 1485 fib6_run_gc(0, (struct net *)arg);
1470} 1486}
1471 1487
1472static int fib6_net_init(struct net *net) 1488static int __net_init fib6_net_init(struct net *net)
1473{ 1489{
1474 setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net); 1490 setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net);
1475 1491
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 6e7bffa2205..e41eba8aacf 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -154,7 +154,7 @@ static void ip6_fl_gc(unsigned long dummy)
154 write_unlock(&ip6_fl_lock); 154 write_unlock(&ip6_fl_lock);
155} 155}
156 156
157static void ip6_fl_purge(struct net *net) 157static void __net_exit ip6_fl_purge(struct net *net)
158{ 158{
159 int i; 159 int i;
160 160
@@ -735,7 +735,7 @@ static const struct file_operations ip6fl_seq_fops = {
735 .release = seq_release_net, 735 .release = seq_release_net,
736}; 736};
737 737
738static int ip6_flowlabel_proc_init(struct net *net) 738static int __net_init ip6_flowlabel_proc_init(struct net *net)
739{ 739{
740 if (!proc_net_fops_create(net, "ip6_flowlabel", 740 if (!proc_net_fops_create(net, "ip6_flowlabel",
741 S_IRUGO, &ip6fl_seq_fops)) 741 S_IRUGO, &ip6fl_seq_fops))
@@ -743,7 +743,7 @@ static int ip6_flowlabel_proc_init(struct net *net)
743 return 0; 743 return 0;
744} 744}
745 745
746static void ip6_flowlabel_proc_fini(struct net *net) 746static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
747{ 747{
748 proc_net_remove(net, "ip6_flowlabel"); 748 proc_net_remove(net, "ip6_flowlabel");
749} 749}
@@ -754,11 +754,10 @@ static inline int ip6_flowlabel_proc_init(struct net *net)
754} 754}
755static inline void ip6_flowlabel_proc_fini(struct net *net) 755static inline void ip6_flowlabel_proc_fini(struct net *net)
756{ 756{
757 return ;
758} 757}
759#endif 758#endif
760 759
761static inline void ip6_flowlabel_net_exit(struct net *net) 760static void __net_exit ip6_flowlabel_net_exit(struct net *net)
762{ 761{
763 ip6_fl_purge(net); 762 ip6_fl_purge(net);
764 ip6_flowlabel_proc_fini(net); 763 ip6_flowlabel_proc_fini(net);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 237e2dba6e9..e28f9203dec 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -216,8 +216,7 @@ resubmit:
216 IP6_INC_STATS_BH(net, idev, 216 IP6_INC_STATS_BH(net, idev,
217 IPSTATS_MIB_INUNKNOWNPROTOS); 217 IPSTATS_MIB_INUNKNOWNPROTOS);
218 icmpv6_send(skb, ICMPV6_PARAMPROB, 218 icmpv6_send(skb, ICMPV6_PARAMPROB,
219 ICMPV6_UNK_NEXTHDR, nhoff, 219 ICMPV6_UNK_NEXTHDR, nhoff);
220 skb->dev);
221 } 220 }
222 } else 221 } else
223 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); 222 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index eb6d0972863..1a5fe9ad194 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -267,7 +267,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
267 if (net_ratelimit()) 267 if (net_ratelimit())
268 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n"); 268 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
269 skb->dev = dst->dev; 269 skb->dev = dst->dev;
270 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 270 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
271 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); 271 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
272 kfree_skb(skb); 272 kfree_skb(skb);
273 return -EMSGSIZE; 273 return -EMSGSIZE;
@@ -441,8 +441,7 @@ int ip6_forward(struct sk_buff *skb)
441 if (hdr->hop_limit <= 1) { 441 if (hdr->hop_limit <= 1) {
442 /* Force OUTPUT device used as source address */ 442 /* Force OUTPUT device used as source address */
443 skb->dev = dst->dev; 443 skb->dev = dst->dev;
444 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 444 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
445 0, skb->dev);
446 IP6_INC_STATS_BH(net, 445 IP6_INC_STATS_BH(net,
447 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); 446 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
448 447
@@ -504,7 +503,7 @@ int ip6_forward(struct sk_buff *skb)
504 goto error; 503 goto error;
505 if (addrtype & IPV6_ADDR_LINKLOCAL) { 504 if (addrtype & IPV6_ADDR_LINKLOCAL) {
506 icmpv6_send(skb, ICMPV6_DEST_UNREACH, 505 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
507 ICMPV6_NOT_NEIGHBOUR, 0, skb->dev); 506 ICMPV6_NOT_NEIGHBOUR, 0);
508 goto error; 507 goto error;
509 } 508 }
510 } 509 }
@@ -512,7 +511,7 @@ int ip6_forward(struct sk_buff *skb)
512 if (skb->len > dst_mtu(dst)) { 511 if (skb->len > dst_mtu(dst)) {
513 /* Again, force OUTPUT device used as source address */ 512 /* Again, force OUTPUT device used as source address */
514 skb->dev = dst->dev; 513 skb->dev = dst->dev;
515 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev); 514 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst));
516 IP6_INC_STATS_BH(net, 515 IP6_INC_STATS_BH(net,
517 ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS); 516 ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
518 IP6_INC_STATS_BH(net, 517 IP6_INC_STATS_BH(net,
@@ -627,7 +626,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
627 */ 626 */
628 if (!skb->local_df) { 627 if (!skb->local_df) {
629 skb->dev = skb_dst(skb)->dev; 628 skb->dev = skb_dst(skb)->dev;
630 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 629 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
631 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 630 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
632 IPSTATS_MIB_FRAGFAILS); 631 IPSTATS_MIB_FRAGFAILS);
633 kfree_skb(skb); 632 kfree_skb(skb);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index d453d07b0df..138980eec21 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -74,7 +74,6 @@ MODULE_LICENSE("GPL");
74 (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \ 74 (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \
75 (HASH_SIZE - 1)) 75 (HASH_SIZE - 1))
76 76
77static void ip6_fb_tnl_dev_init(struct net_device *dev);
78static void ip6_tnl_dev_init(struct net_device *dev); 77static void ip6_tnl_dev_init(struct net_device *dev);
79static void ip6_tnl_dev_setup(struct net_device *dev); 78static void ip6_tnl_dev_setup(struct net_device *dev);
80 79
@@ -623,7 +622,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
623 if (rt && rt->rt6i_dev) 622 if (rt && rt->rt6i_dev)
624 skb2->dev = rt->rt6i_dev; 623 skb2->dev = rt->rt6i_dev;
625 624
626 icmpv6_send(skb2, rel_type, rel_code, rel_info, skb2->dev); 625 icmpv6_send(skb2, rel_type, rel_code, rel_info);
627 626
628 if (rt) 627 if (rt)
629 dst_release(&rt->u.dst); 628 dst_release(&rt->u.dst);
@@ -1015,7 +1014,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1015 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; 1014 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
1016 if (tel->encap_limit == 0) { 1015 if (tel->encap_limit == 0) {
1017 icmpv6_send(skb, ICMPV6_PARAMPROB, 1016 icmpv6_send(skb, ICMPV6_PARAMPROB,
1018 ICMPV6_HDR_FIELD, offset + 2, skb->dev); 1017 ICMPV6_HDR_FIELD, offset + 2);
1019 return -1; 1018 return -1;
1020 } 1019 }
1021 encap_limit = tel->encap_limit - 1; 1020 encap_limit = tel->encap_limit - 1;
@@ -1034,7 +1033,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1034 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu); 1033 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu);
1035 if (err != 0) { 1034 if (err != 0) {
1036 if (err == -EMSGSIZE) 1035 if (err == -EMSGSIZE)
1037 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 1036 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1038 return -1; 1037 return -1;
1039 } 1038 }
1040 1039
@@ -1364,7 +1363,7 @@ static void ip6_tnl_dev_init(struct net_device *dev)
1364 * Return: 0 1363 * Return: 0
1365 **/ 1364 **/
1366 1365
1367static void ip6_fb_tnl_dev_init(struct net_device *dev) 1366static void __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1368{ 1367{
1369 struct ip6_tnl *t = netdev_priv(dev); 1368 struct ip6_tnl *t = netdev_priv(dev);
1370 struct net *net = dev_net(dev); 1369 struct net *net = dev_net(dev);
@@ -1388,7 +1387,7 @@ static struct xfrm6_tunnel ip6ip6_handler = {
1388 .priority = 1, 1387 .priority = 1,
1389}; 1388};
1390 1389
1391static void ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n) 1390static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1392{ 1391{
1393 int h; 1392 int h;
1394 struct ip6_tnl *t; 1393 struct ip6_tnl *t;
@@ -1407,7 +1406,7 @@ static void ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1407 unregister_netdevice_many(&list); 1406 unregister_netdevice_many(&list);
1408} 1407}
1409 1408
1410static int ip6_tnl_init_net(struct net *net) 1409static int __net_init ip6_tnl_init_net(struct net *net)
1411{ 1410{
1412 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1411 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1413 int err; 1412 int err;
@@ -1436,7 +1435,7 @@ err_alloc_dev:
1436 return err; 1435 return err;
1437} 1436}
1438 1437
1439static void ip6_tnl_exit_net(struct net *net) 1438static void __net_exit ip6_tnl_exit_net(struct net *net)
1440{ 1439{
1441 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1440 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1442 1441
@@ -1462,27 +1461,29 @@ static int __init ip6_tunnel_init(void)
1462{ 1461{
1463 int err; 1462 int err;
1464 1463
1465 if (xfrm6_tunnel_register(&ip4ip6_handler, AF_INET)) { 1464 err = register_pernet_device(&ip6_tnl_net_ops);
1465 if (err < 0)
1466 goto out_pernet;
1467
1468 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
1469 if (err < 0) {
1466 printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n"); 1470 printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n");
1467 err = -EAGAIN; 1471 goto out_ip4ip6;
1468 goto out;
1469 } 1472 }
1470 1473
1471 if (xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6)) { 1474 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
1475 if (err < 0) {
1472 printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n"); 1476 printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n");
1473 err = -EAGAIN; 1477 goto out_ip6ip6;
1474 goto unreg_ip4ip6;
1475 } 1478 }
1476 1479
1477 err = register_pernet_device(&ip6_tnl_net_ops);
1478 if (err < 0)
1479 goto err_pernet;
1480 return 0; 1480 return 0;
1481err_pernet: 1481
1482 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6); 1482out_ip6ip6:
1483unreg_ip4ip6:
1484 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET); 1483 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
1485out: 1484out_ip4ip6:
1485 unregister_pernet_device(&ip6_tnl_net_ops);
1486out_pernet:
1486 return err; 1487 return err;
1487} 1488}
1488 1489
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 002e6eef912..85cccd6ed0b 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -53,6 +53,7 @@
53static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 53static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
54 u8 type, u8 code, int offset, __be32 info) 54 u8 type, u8 code, int offset, __be32 info)
55{ 55{
56 struct net *net = dev_net(skb->dev);
56 __be32 spi; 57 __be32 spi;
57 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 58 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
58 struct ip_comp_hdr *ipcomph = 59 struct ip_comp_hdr *ipcomph =
@@ -63,7 +64,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
63 return; 64 return;
64 65
65 spi = htonl(ntohs(ipcomph->cpi)); 66 spi = htonl(ntohs(ipcomph->cpi));
66 x = xfrm_state_lookup(&init_net, (xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6); 67 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6);
67 if (!x) 68 if (!x)
68 return; 69 return;
69 70
@@ -74,14 +75,15 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
74 75
75static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x) 76static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
76{ 77{
78 struct net *net = xs_net(x);
77 struct xfrm_state *t = NULL; 79 struct xfrm_state *t = NULL;
78 80
79 t = xfrm_state_alloc(&init_net); 81 t = xfrm_state_alloc(net);
80 if (!t) 82 if (!t)
81 goto out; 83 goto out;
82 84
83 t->id.proto = IPPROTO_IPV6; 85 t->id.proto = IPPROTO_IPV6;
84 t->id.spi = xfrm6_tunnel_alloc_spi((xfrm_address_t *)&x->props.saddr); 86 t->id.spi = xfrm6_tunnel_alloc_spi(net, (xfrm_address_t *)&x->props.saddr);
85 if (!t->id.spi) 87 if (!t->id.spi)
86 goto error; 88 goto error;
87 89
@@ -90,6 +92,7 @@ static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
90 t->props.family = AF_INET6; 92 t->props.family = AF_INET6;
91 t->props.mode = x->props.mode; 93 t->props.mode = x->props.mode;
92 memcpy(t->props.saddr.a6, x->props.saddr.a6, sizeof(struct in6_addr)); 94 memcpy(t->props.saddr.a6, x->props.saddr.a6, sizeof(struct in6_addr));
95 memcpy(&t->mark, &x->mark, sizeof(t->mark));
93 96
94 if (xfrm_init_state(t)) 97 if (xfrm_init_state(t))
95 goto error; 98 goto error;
@@ -108,13 +111,15 @@ error:
108 111
109static int ipcomp6_tunnel_attach(struct xfrm_state *x) 112static int ipcomp6_tunnel_attach(struct xfrm_state *x)
110{ 113{
114 struct net *net = xs_net(x);
111 int err = 0; 115 int err = 0;
112 struct xfrm_state *t = NULL; 116 struct xfrm_state *t = NULL;
113 __be32 spi; 117 __be32 spi;
118 u32 mark = x->mark.m & x->mark.v;
114 119
115 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&x->props.saddr); 120 spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&x->props.saddr);
116 if (spi) 121 if (spi)
117 t = xfrm_state_lookup(&init_net, (xfrm_address_t *)&x->id.daddr, 122 t = xfrm_state_lookup(net, mark, (xfrm_address_t *)&x->id.daddr,
118 spi, IPPROTO_IPV6, AF_INET6); 123 spi, IPPROTO_IPV6, AF_INET6);
119 if (!t) { 124 if (!t) {
120 t = ipcomp6_tunnel_create(x); 125 t = ipcomp6_tunnel_create(x);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 1f9c44442e6..bcd97191596 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -793,10 +793,10 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
793 } 793 }
794 spin_unlock_bh(&im->mca_lock); 794 spin_unlock_bh(&im->mca_lock);
795 795
796 write_lock_bh(&idev->mc_lock); 796 spin_lock_bh(&idev->mc_lock);
797 pmc->next = idev->mc_tomb; 797 pmc->next = idev->mc_tomb;
798 idev->mc_tomb = pmc; 798 idev->mc_tomb = pmc;
799 write_unlock_bh(&idev->mc_lock); 799 spin_unlock_bh(&idev->mc_lock);
800} 800}
801 801
802static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca) 802static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca)
@@ -804,7 +804,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca)
804 struct ifmcaddr6 *pmc, *pmc_prev; 804 struct ifmcaddr6 *pmc, *pmc_prev;
805 struct ip6_sf_list *psf, *psf_next; 805 struct ip6_sf_list *psf, *psf_next;
806 806
807 write_lock_bh(&idev->mc_lock); 807 spin_lock_bh(&idev->mc_lock);
808 pmc_prev = NULL; 808 pmc_prev = NULL;
809 for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) { 809 for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) {
810 if (ipv6_addr_equal(&pmc->mca_addr, pmca)) 810 if (ipv6_addr_equal(&pmc->mca_addr, pmca))
@@ -817,7 +817,8 @@ static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca)
817 else 817 else
818 idev->mc_tomb = pmc->next; 818 idev->mc_tomb = pmc->next;
819 } 819 }
820 write_unlock_bh(&idev->mc_lock); 820 spin_unlock_bh(&idev->mc_lock);
821
821 if (pmc) { 822 if (pmc) {
822 for (psf=pmc->mca_tomb; psf; psf=psf_next) { 823 for (psf=pmc->mca_tomb; psf; psf=psf_next) {
823 psf_next = psf->sf_next; 824 psf_next = psf->sf_next;
@@ -832,10 +833,10 @@ static void mld_clear_delrec(struct inet6_dev *idev)
832{ 833{
833 struct ifmcaddr6 *pmc, *nextpmc; 834 struct ifmcaddr6 *pmc, *nextpmc;
834 835
835 write_lock_bh(&idev->mc_lock); 836 spin_lock_bh(&idev->mc_lock);
836 pmc = idev->mc_tomb; 837 pmc = idev->mc_tomb;
837 idev->mc_tomb = NULL; 838 idev->mc_tomb = NULL;
838 write_unlock_bh(&idev->mc_lock); 839 spin_unlock_bh(&idev->mc_lock);
839 840
840 for (; pmc; pmc = nextpmc) { 841 for (; pmc; pmc = nextpmc) {
841 nextpmc = pmc->next; 842 nextpmc = pmc->next;
@@ -1696,7 +1697,7 @@ static void mld_send_cr(struct inet6_dev *idev)
1696 int type, dtype; 1697 int type, dtype;
1697 1698
1698 read_lock_bh(&idev->lock); 1699 read_lock_bh(&idev->lock);
1699 write_lock_bh(&idev->mc_lock); 1700 spin_lock(&idev->mc_lock);
1700 1701
1701 /* deleted MCA's */ 1702 /* deleted MCA's */
1702 pmc_prev = NULL; 1703 pmc_prev = NULL;
@@ -1730,7 +1731,7 @@ static void mld_send_cr(struct inet6_dev *idev)
1730 } else 1731 } else
1731 pmc_prev = pmc; 1732 pmc_prev = pmc;
1732 } 1733 }
1733 write_unlock_bh(&idev->mc_lock); 1734 spin_unlock(&idev->mc_lock);
1734 1735
1735 /* change recs */ 1736 /* change recs */
1736 for (pmc=idev->mc_list; pmc; pmc=pmc->next) { 1737 for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
@@ -2311,7 +2312,7 @@ void ipv6_mc_up(struct inet6_dev *idev)
2311void ipv6_mc_init_dev(struct inet6_dev *idev) 2312void ipv6_mc_init_dev(struct inet6_dev *idev)
2312{ 2313{
2313 write_lock_bh(&idev->lock); 2314 write_lock_bh(&idev->lock);
2314 rwlock_init(&idev->mc_lock); 2315 spin_lock_init(&idev->mc_lock);
2315 idev->mc_gq_running = 0; 2316 idev->mc_gq_running = 0;
2316 setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire, 2317 setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire,
2317 (unsigned long)idev); 2318 (unsigned long)idev);
@@ -2646,7 +2647,7 @@ static const struct file_operations igmp6_mcf_seq_fops = {
2646 .release = seq_release_net, 2647 .release = seq_release_net,
2647}; 2648};
2648 2649
2649static int igmp6_proc_init(struct net *net) 2650static int __net_init igmp6_proc_init(struct net *net)
2650{ 2651{
2651 int err; 2652 int err;
2652 2653
@@ -2666,23 +2667,22 @@ out_proc_net_igmp6:
2666 goto out; 2667 goto out;
2667} 2668}
2668 2669
2669static void igmp6_proc_exit(struct net *net) 2670static void __net_exit igmp6_proc_exit(struct net *net)
2670{ 2671{
2671 proc_net_remove(net, "mcfilter6"); 2672 proc_net_remove(net, "mcfilter6");
2672 proc_net_remove(net, "igmp6"); 2673 proc_net_remove(net, "igmp6");
2673} 2674}
2674#else 2675#else
2675static int igmp6_proc_init(struct net *net) 2676static inline int igmp6_proc_init(struct net *net)
2676{ 2677{
2677 return 0; 2678 return 0;
2678} 2679}
2679static void igmp6_proc_exit(struct net *net) 2680static inline void igmp6_proc_exit(struct net *net)
2680{ 2681{
2681 ;
2682} 2682}
2683#endif 2683#endif
2684 2684
2685static int igmp6_net_init(struct net *net) 2685static int __net_init igmp6_net_init(struct net *net)
2686{ 2686{
2687 int err; 2687 int err;
2688 2688
@@ -2708,7 +2708,7 @@ out_sock_create:
2708 goto out; 2708 goto out;
2709} 2709}
2710 2710
2711static void igmp6_net_exit(struct net *net) 2711static void __net_exit igmp6_net_exit(struct net *net)
2712{ 2712{
2713 inet_ctl_sock_destroy(net->ipv6.igmp_sk); 2713 inet_ctl_sock_destroy(net->ipv6.igmp_sk);
2714 igmp6_proc_exit(net); 2714 igmp6_proc_exit(net);
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index f797e8c6f3b..2794b600283 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -56,7 +56,7 @@ static inline void *mip6_padn(__u8 *data, __u8 padlen)
56 56
57static inline void mip6_param_prob(struct sk_buff *skb, u8 code, int pos) 57static inline void mip6_param_prob(struct sk_buff *skb, u8 code, int pos)
58{ 58{
59 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev); 59 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos);
60} 60}
61 61
62static int mip6_mh_len(int type) 62static int mip6_mh_len(int type)
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index c4585279809..8bcc4b7db3b 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1772,7 +1772,7 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void __user *bu
1772 1772
1773#endif 1773#endif
1774 1774
1775static int ndisc_net_init(struct net *net) 1775static int __net_init ndisc_net_init(struct net *net)
1776{ 1776{
1777 struct ipv6_pinfo *np; 1777 struct ipv6_pinfo *np;
1778 struct sock *sk; 1778 struct sock *sk;
@@ -1797,7 +1797,7 @@ static int ndisc_net_init(struct net *net)
1797 return 0; 1797 return 0;
1798} 1798}
1799 1799
1800static void ndisc_net_exit(struct net *net) 1800static void __net_exit ndisc_net_exit(struct net *net)
1801{ 1801{
1802 inet_ctl_sock_destroy(net->ipv6.ndisc_sk); 1802 inet_ctl_sock_destroy(net->ipv6.ndisc_sk);
1803} 1803}
@@ -1820,8 +1820,7 @@ int __init ndisc_init(void)
1820 neigh_table_init(&nd_tbl); 1820 neigh_table_init(&nd_tbl);
1821 1821
1822#ifdef CONFIG_SYSCTL 1822#ifdef CONFIG_SYSCTL
1823 err = neigh_sysctl_register(NULL, &nd_tbl.parms, NET_IPV6, 1823 err = neigh_sysctl_register(NULL, &nd_tbl.parms, "ipv6",
1824 NET_IPV6_NEIGH, "ipv6",
1825 &ndisc_ifinfo_sysctl_change); 1824 &ndisc_ifinfo_sysctl_change);
1826 if (err) 1825 if (err)
1827 goto out_unregister_pernet; 1826 goto out_unregister_pernet;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 8a7e0f52e17..f7042869198 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -29,6 +29,7 @@
29#include <linux/netfilter_ipv6/ip6_tables.h> 29#include <linux/netfilter_ipv6/ip6_tables.h>
30#include <linux/netfilter/x_tables.h> 30#include <linux/netfilter/x_tables.h>
31#include <net/netfilter/nf_log.h> 31#include <net/netfilter/nf_log.h>
32#include "../../netfilter/xt_repldata.h"
32 33
33MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
34MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 35MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -67,6 +68,12 @@ do { \
67#define inline 68#define inline
68#endif 69#endif
69 70
71void *ip6t_alloc_initial_table(const struct xt_table *info)
72{
73 return xt_alloc_initial_table(ip6t, IP6T);
74}
75EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
76
70/* 77/*
71 We keep a set of rules for each CPU, so we can avoid write-locking 78 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore 79 them in the softirq when updating the counters and therefore
@@ -201,7 +208,7 @@ ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
201 208
202/* Performance critical - called for every packet */ 209/* Performance critical - called for every packet */
203static inline bool 210static inline bool
204do_match(struct ip6t_entry_match *m, const struct sk_buff *skb, 211do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb,
205 struct xt_match_param *par) 212 struct xt_match_param *par)
206{ 213{
207 par->match = m->u.kernel.match; 214 par->match = m->u.kernel.match;
@@ -215,7 +222,7 @@ do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
215} 222}
216 223
217static inline struct ip6t_entry * 224static inline struct ip6t_entry *
218get_entry(void *base, unsigned int offset) 225get_entry(const void *base, unsigned int offset)
219{ 226{
220 return (struct ip6t_entry *)(base + offset); 227 return (struct ip6t_entry *)(base + offset);
221} 228}
@@ -229,6 +236,12 @@ static inline bool unconditional(const struct ip6t_ip6 *ipv6)
229 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0; 236 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
230} 237}
231 238
239static inline const struct ip6t_entry_target *
240ip6t_get_target_c(const struct ip6t_entry *e)
241{
242 return ip6t_get_target((struct ip6t_entry *)e);
243}
244
232#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 245#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
233 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 246 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
234/* This cries for unification! */ 247/* This cries for unification! */
@@ -264,11 +277,11 @@ static struct nf_loginfo trace_loginfo = {
264 277
265/* Mildly perf critical (only if packet tracing is on) */ 278/* Mildly perf critical (only if packet tracing is on) */
266static inline int 279static inline int
267get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e, 280get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
268 const char *hookname, const char **chainname, 281 const char *hookname, const char **chainname,
269 const char **comment, unsigned int *rulenum) 282 const char **comment, unsigned int *rulenum)
270{ 283{
271 struct ip6t_standard_target *t = (void *)ip6t_get_target(s); 284 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
272 285
273 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) { 286 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
274 /* Head of user chain: ERROR target with chainname */ 287 /* Head of user chain: ERROR target with chainname */
@@ -294,17 +307,18 @@ get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
294 return 0; 307 return 0;
295} 308}
296 309
297static void trace_packet(struct sk_buff *skb, 310static void trace_packet(const struct sk_buff *skb,
298 unsigned int hook, 311 unsigned int hook,
299 const struct net_device *in, 312 const struct net_device *in,
300 const struct net_device *out, 313 const struct net_device *out,
301 const char *tablename, 314 const char *tablename,
302 struct xt_table_info *private, 315 const struct xt_table_info *private,
303 struct ip6t_entry *e) 316 const struct ip6t_entry *e)
304{ 317{
305 void *table_base; 318 const void *table_base;
306 const struct ip6t_entry *root; 319 const struct ip6t_entry *root;
307 const char *hookname, *chainname, *comment; 320 const char *hookname, *chainname, *comment;
321 const struct ip6t_entry *iter;
308 unsigned int rulenum = 0; 322 unsigned int rulenum = 0;
309 323
310 table_base = private->entries[smp_processor_id()]; 324 table_base = private->entries[smp_processor_id()];
@@ -313,10 +327,10 @@ static void trace_packet(struct sk_buff *skb,
313 hookname = chainname = hooknames[hook]; 327 hookname = chainname = hooknames[hook];
314 comment = comments[NF_IP6_TRACE_COMMENT_RULE]; 328 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
315 329
316 IP6T_ENTRY_ITERATE(root, 330 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
317 private->size - private->hook_entry[hook], 331 if (get_chainname_rulenum(iter, e, hookname,
318 get_chainname_rulenum, 332 &chainname, &comment, &rulenum) != 0)
319 e, hookname, &chainname, &comment, &rulenum); 333 break;
320 334
321 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo, 335 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
322 "TRACE: %s:%s:%s:%u ", 336 "TRACE: %s:%s:%s:%u ",
@@ -345,9 +359,9 @@ ip6t_do_table(struct sk_buff *skb,
345 /* Initializing verdict to NF_DROP keeps gcc happy. */ 359 /* Initializing verdict to NF_DROP keeps gcc happy. */
346 unsigned int verdict = NF_DROP; 360 unsigned int verdict = NF_DROP;
347 const char *indev, *outdev; 361 const char *indev, *outdev;
348 void *table_base; 362 const void *table_base;
349 struct ip6t_entry *e, *back; 363 struct ip6t_entry *e, *back;
350 struct xt_table_info *private; 364 const struct xt_table_info *private;
351 struct xt_match_param mtpar; 365 struct xt_match_param mtpar;
352 struct xt_target_param tgpar; 366 struct xt_target_param tgpar;
353 367
@@ -378,22 +392,27 @@ ip6t_do_table(struct sk_buff *skb,
378 back = get_entry(table_base, private->underflow[hook]); 392 back = get_entry(table_base, private->underflow[hook]);
379 393
380 do { 394 do {
381 struct ip6t_entry_target *t; 395 const struct ip6t_entry_target *t;
396 const struct xt_entry_match *ematch;
382 397
383 IP_NF_ASSERT(e); 398 IP_NF_ASSERT(e);
384 IP_NF_ASSERT(back); 399 IP_NF_ASSERT(back);
385 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, 400 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
386 &mtpar.thoff, &mtpar.fragoff, &hotdrop) || 401 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
387 IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) { 402 no_match:
388 e = ip6t_next_entry(e); 403 e = ip6t_next_entry(e);
389 continue; 404 continue;
390 } 405 }
391 406
407 xt_ematch_foreach(ematch, e)
408 if (do_match(ematch, skb, &mtpar) != 0)
409 goto no_match;
410
392 ADD_COUNTER(e->counters, 411 ADD_COUNTER(e->counters,
393 ntohs(ipv6_hdr(skb)->payload_len) + 412 ntohs(ipv6_hdr(skb)->payload_len) +
394 sizeof(struct ipv6hdr), 1); 413 sizeof(struct ipv6hdr), 1);
395 414
396 t = ip6t_get_target(e); 415 t = ip6t_get_target_c(e);
397 IP_NF_ASSERT(t->u.kernel.target); 416 IP_NF_ASSERT(t->u.kernel.target);
398 417
399#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 418#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
@@ -475,7 +494,7 @@ ip6t_do_table(struct sk_buff *skb,
475/* Figures out from what hook each rule can be called: returns 0 if 494/* Figures out from what hook each rule can be called: returns 0 if
476 there are loops. Puts hook bitmask in comefrom. */ 495 there are loops. Puts hook bitmask in comefrom. */
477static int 496static int
478mark_source_chains(struct xt_table_info *newinfo, 497mark_source_chains(const struct xt_table_info *newinfo,
479 unsigned int valid_hooks, void *entry0) 498 unsigned int valid_hooks, void *entry0)
480{ 499{
481 unsigned int hook; 500 unsigned int hook;
@@ -493,8 +512,8 @@ mark_source_chains(struct xt_table_info *newinfo,
493 e->counters.pcnt = pos; 512 e->counters.pcnt = pos;
494 513
495 for (;;) { 514 for (;;) {
496 struct ip6t_standard_target *t 515 const struct ip6t_standard_target *t
497 = (void *)ip6t_get_target(e); 516 = (void *)ip6t_get_target_c(e);
498 int visited = e->comefrom & (1 << hook); 517 int visited = e->comefrom & (1 << hook);
499 518
500 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { 519 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
@@ -584,27 +603,23 @@ mark_source_chains(struct xt_table_info *newinfo,
584 return 1; 603 return 1;
585} 604}
586 605
587static int 606static void cleanup_match(struct ip6t_entry_match *m, struct net *net)
588cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
589{ 607{
590 struct xt_mtdtor_param par; 608 struct xt_mtdtor_param par;
591 609
592 if (i && (*i)-- == 0) 610 par.net = net;
593 return 1;
594
595 par.match = m->u.kernel.match; 611 par.match = m->u.kernel.match;
596 par.matchinfo = m->data; 612 par.matchinfo = m->data;
597 par.family = NFPROTO_IPV6; 613 par.family = NFPROTO_IPV6;
598 if (par.match->destroy != NULL) 614 if (par.match->destroy != NULL)
599 par.match->destroy(&par); 615 par.match->destroy(&par);
600 module_put(par.match->me); 616 module_put(par.match->me);
601 return 0;
602} 617}
603 618
604static int 619static int
605check_entry(struct ip6t_entry *e, const char *name) 620check_entry(const struct ip6t_entry *e, const char *name)
606{ 621{
607 struct ip6t_entry_target *t; 622 const struct ip6t_entry_target *t;
608 623
609 if (!ip6_checkentry(&e->ipv6)) { 624 if (!ip6_checkentry(&e->ipv6)) {
610 duprintf("ip_tables: ip check failed %p %s.\n", e, name); 625 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
@@ -615,15 +630,14 @@ check_entry(struct ip6t_entry *e, const char *name)
615 e->next_offset) 630 e->next_offset)
616 return -EINVAL; 631 return -EINVAL;
617 632
618 t = ip6t_get_target(e); 633 t = ip6t_get_target_c(e);
619 if (e->target_offset + t->u.target_size > e->next_offset) 634 if (e->target_offset + t->u.target_size > e->next_offset)
620 return -EINVAL; 635 return -EINVAL;
621 636
622 return 0; 637 return 0;
623} 638}
624 639
625static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par, 640static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
626 unsigned int *i)
627{ 641{
628 const struct ip6t_ip6 *ipv6 = par->entryinfo; 642 const struct ip6t_ip6 *ipv6 = par->entryinfo;
629 int ret; 643 int ret;
@@ -638,13 +652,11 @@ static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
638 par.match->name); 652 par.match->name);
639 return ret; 653 return ret;
640 } 654 }
641 ++*i;
642 return 0; 655 return 0;
643} 656}
644 657
645static int 658static int
646find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par, 659find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
647 unsigned int *i)
648{ 660{
649 struct xt_match *match; 661 struct xt_match *match;
650 int ret; 662 int ret;
@@ -658,7 +670,7 @@ find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
658 } 670 }
659 m->u.kernel.match = match; 671 m->u.kernel.match = match;
660 672
661 ret = check_match(m, par, i); 673 ret = check_match(m, par);
662 if (ret) 674 if (ret)
663 goto err; 675 goto err;
664 676
@@ -668,10 +680,11 @@ err:
668 return ret; 680 return ret;
669} 681}
670 682
671static int check_target(struct ip6t_entry *e, const char *name) 683static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
672{ 684{
673 struct ip6t_entry_target *t = ip6t_get_target(e); 685 struct ip6t_entry_target *t = ip6t_get_target(e);
674 struct xt_tgchk_param par = { 686 struct xt_tgchk_param par = {
687 .net = net,
675 .table = name, 688 .table = name,
676 .entryinfo = e, 689 .entryinfo = e,
677 .target = t->u.kernel.target, 690 .target = t->u.kernel.target,
@@ -693,27 +706,32 @@ static int check_target(struct ip6t_entry *e, const char *name)
693} 706}
694 707
695static int 708static int
696find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size, 709find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
697 unsigned int *i) 710 unsigned int size)
698{ 711{
699 struct ip6t_entry_target *t; 712 struct ip6t_entry_target *t;
700 struct xt_target *target; 713 struct xt_target *target;
701 int ret; 714 int ret;
702 unsigned int j; 715 unsigned int j;
703 struct xt_mtchk_param mtpar; 716 struct xt_mtchk_param mtpar;
717 struct xt_entry_match *ematch;
704 718
705 ret = check_entry(e, name); 719 ret = check_entry(e, name);
706 if (ret) 720 if (ret)
707 return ret; 721 return ret;
708 722
709 j = 0; 723 j = 0;
724 mtpar.net = net;
710 mtpar.table = name; 725 mtpar.table = name;
711 mtpar.entryinfo = &e->ipv6; 726 mtpar.entryinfo = &e->ipv6;
712 mtpar.hook_mask = e->comefrom; 727 mtpar.hook_mask = e->comefrom;
713 mtpar.family = NFPROTO_IPV6; 728 mtpar.family = NFPROTO_IPV6;
714 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j); 729 xt_ematch_foreach(ematch, e) {
715 if (ret != 0) 730 ret = find_check_match(ematch, &mtpar);
716 goto cleanup_matches; 731 if (ret != 0)
732 goto cleanup_matches;
733 ++j;
734 }
717 735
718 t = ip6t_get_target(e); 736 t = ip6t_get_target(e);
719 target = try_then_request_module(xt_find_target(AF_INET6, 737 target = try_then_request_module(xt_find_target(AF_INET6,
@@ -727,27 +745,29 @@ find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
727 } 745 }
728 t->u.kernel.target = target; 746 t->u.kernel.target = target;
729 747
730 ret = check_target(e, name); 748 ret = check_target(e, net, name);
731 if (ret) 749 if (ret)
732 goto err; 750 goto err;
733
734 (*i)++;
735 return 0; 751 return 0;
736 err: 752 err:
737 module_put(t->u.kernel.target->me); 753 module_put(t->u.kernel.target->me);
738 cleanup_matches: 754 cleanup_matches:
739 IP6T_MATCH_ITERATE(e, cleanup_match, &j); 755 xt_ematch_foreach(ematch, e) {
756 if (j-- == 0)
757 break;
758 cleanup_match(ematch, net);
759 }
740 return ret; 760 return ret;
741} 761}
742 762
743static bool check_underflow(struct ip6t_entry *e) 763static bool check_underflow(const struct ip6t_entry *e)
744{ 764{
745 const struct ip6t_entry_target *t; 765 const struct ip6t_entry_target *t;
746 unsigned int verdict; 766 unsigned int verdict;
747 767
748 if (!unconditional(&e->ipv6)) 768 if (!unconditional(&e->ipv6))
749 return false; 769 return false;
750 t = ip6t_get_target(e); 770 t = ip6t_get_target_c(e);
751 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) 771 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
752 return false; 772 return false;
753 verdict = ((struct ip6t_standard_target *)t)->verdict; 773 verdict = ((struct ip6t_standard_target *)t)->verdict;
@@ -758,12 +778,11 @@ static bool check_underflow(struct ip6t_entry *e)
758static int 778static int
759check_entry_size_and_hooks(struct ip6t_entry *e, 779check_entry_size_and_hooks(struct ip6t_entry *e,
760 struct xt_table_info *newinfo, 780 struct xt_table_info *newinfo,
761 unsigned char *base, 781 const unsigned char *base,
762 unsigned char *limit, 782 const unsigned char *limit,
763 const unsigned int *hook_entries, 783 const unsigned int *hook_entries,
764 const unsigned int *underflows, 784 const unsigned int *underflows,
765 unsigned int valid_hooks, 785 unsigned int valid_hooks)
766 unsigned int *i)
767{ 786{
768 unsigned int h; 787 unsigned int h;
769 788
@@ -800,50 +819,41 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
800 /* Clear counters and comefrom */ 819 /* Clear counters and comefrom */
801 e->counters = ((struct xt_counters) { 0, 0 }); 820 e->counters = ((struct xt_counters) { 0, 0 });
802 e->comefrom = 0; 821 e->comefrom = 0;
803
804 (*i)++;
805 return 0; 822 return 0;
806} 823}
807 824
808static int 825static void cleanup_entry(struct ip6t_entry *e, struct net *net)
809cleanup_entry(struct ip6t_entry *e, unsigned int *i)
810{ 826{
811 struct xt_tgdtor_param par; 827 struct xt_tgdtor_param par;
812 struct ip6t_entry_target *t; 828 struct ip6t_entry_target *t;
813 829 struct xt_entry_match *ematch;
814 if (i && (*i)-- == 0)
815 return 1;
816 830
817 /* Cleanup all matches */ 831 /* Cleanup all matches */
818 IP6T_MATCH_ITERATE(e, cleanup_match, NULL); 832 xt_ematch_foreach(ematch, e)
833 cleanup_match(ematch, net);
819 t = ip6t_get_target(e); 834 t = ip6t_get_target(e);
820 835
836 par.net = net;
821 par.target = t->u.kernel.target; 837 par.target = t->u.kernel.target;
822 par.targinfo = t->data; 838 par.targinfo = t->data;
823 par.family = NFPROTO_IPV6; 839 par.family = NFPROTO_IPV6;
824 if (par.target->destroy != NULL) 840 if (par.target->destroy != NULL)
825 par.target->destroy(&par); 841 par.target->destroy(&par);
826 module_put(par.target->me); 842 module_put(par.target->me);
827 return 0;
828} 843}
829 844
830/* Checks and translates the user-supplied table segment (held in 845/* Checks and translates the user-supplied table segment (held in
831 newinfo) */ 846 newinfo) */
832static int 847static int
833translate_table(const char *name, 848translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
834 unsigned int valid_hooks, 849 const struct ip6t_replace *repl)
835 struct xt_table_info *newinfo,
836 void *entry0,
837 unsigned int size,
838 unsigned int number,
839 const unsigned int *hook_entries,
840 const unsigned int *underflows)
841{ 850{
851 struct ip6t_entry *iter;
842 unsigned int i; 852 unsigned int i;
843 int ret; 853 int ret = 0;
844 854
845 newinfo->size = size; 855 newinfo->size = repl->size;
846 newinfo->number = number; 856 newinfo->number = repl->num_entries;
847 857
848 /* Init all hooks to impossible value. */ 858 /* Init all hooks to impossible value. */
849 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 859 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
@@ -854,49 +864,56 @@ translate_table(const char *name,
854 duprintf("translate_table: size %u\n", newinfo->size); 864 duprintf("translate_table: size %u\n", newinfo->size);
855 i = 0; 865 i = 0;
856 /* Walk through entries, checking offsets. */ 866 /* Walk through entries, checking offsets. */
857 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size, 867 xt_entry_foreach(iter, entry0, newinfo->size) {
858 check_entry_size_and_hooks, 868 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
859 newinfo, 869 entry0 + repl->size, repl->hook_entry, repl->underflow,
860 entry0, 870 repl->valid_hooks);
861 entry0 + size, 871 if (ret != 0)
862 hook_entries, underflows, valid_hooks, &i); 872 return ret;
863 if (ret != 0) 873 ++i;
864 return ret; 874 }
865 875
866 if (i != number) { 876 if (i != repl->num_entries) {
867 duprintf("translate_table: %u not %u entries\n", 877 duprintf("translate_table: %u not %u entries\n",
868 i, number); 878 i, repl->num_entries);
869 return -EINVAL; 879 return -EINVAL;
870 } 880 }
871 881
872 /* Check hooks all assigned */ 882 /* Check hooks all assigned */
873 for (i = 0; i < NF_INET_NUMHOOKS; i++) { 883 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
874 /* Only hooks which are valid */ 884 /* Only hooks which are valid */
875 if (!(valid_hooks & (1 << i))) 885 if (!(repl->valid_hooks & (1 << i)))
876 continue; 886 continue;
877 if (newinfo->hook_entry[i] == 0xFFFFFFFF) { 887 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
878 duprintf("Invalid hook entry %u %u\n", 888 duprintf("Invalid hook entry %u %u\n",
879 i, hook_entries[i]); 889 i, repl->hook_entry[i]);
880 return -EINVAL; 890 return -EINVAL;
881 } 891 }
882 if (newinfo->underflow[i] == 0xFFFFFFFF) { 892 if (newinfo->underflow[i] == 0xFFFFFFFF) {
883 duprintf("Invalid underflow %u %u\n", 893 duprintf("Invalid underflow %u %u\n",
884 i, underflows[i]); 894 i, repl->underflow[i]);
885 return -EINVAL; 895 return -EINVAL;
886 } 896 }
887 } 897 }
888 898
889 if (!mark_source_chains(newinfo, valid_hooks, entry0)) 899 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
890 return -ELOOP; 900 return -ELOOP;
891 901
892 /* Finally, each sanity check must pass */ 902 /* Finally, each sanity check must pass */
893 i = 0; 903 i = 0;
894 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size, 904 xt_entry_foreach(iter, entry0, newinfo->size) {
895 find_check_entry, name, size, &i); 905 ret = find_check_entry(iter, net, repl->name, repl->size);
906 if (ret != 0)
907 break;
908 ++i;
909 }
896 910
897 if (ret != 0) { 911 if (ret != 0) {
898 IP6T_ENTRY_ITERATE(entry0, newinfo->size, 912 xt_entry_foreach(iter, entry0, newinfo->size) {
899 cleanup_entry, &i); 913 if (i-- == 0)
914 break;
915 cleanup_entry(iter, net);
916 }
900 return ret; 917 return ret;
901 } 918 }
902 919
@@ -909,33 +926,11 @@ translate_table(const char *name,
909 return ret; 926 return ret;
910} 927}
911 928
912/* Gets counters. */
913static inline int
914add_entry_to_counter(const struct ip6t_entry *e,
915 struct xt_counters total[],
916 unsigned int *i)
917{
918 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
919
920 (*i)++;
921 return 0;
922}
923
924static inline int
925set_entry_to_counter(const struct ip6t_entry *e,
926 struct ip6t_counters total[],
927 unsigned int *i)
928{
929 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
930
931 (*i)++;
932 return 0;
933}
934
935static void 929static void
936get_counters(const struct xt_table_info *t, 930get_counters(const struct xt_table_info *t,
937 struct xt_counters counters[]) 931 struct xt_counters counters[])
938{ 932{
933 struct ip6t_entry *iter;
939 unsigned int cpu; 934 unsigned int cpu;
940 unsigned int i; 935 unsigned int i;
941 unsigned int curcpu; 936 unsigned int curcpu;
@@ -951,32 +946,32 @@ get_counters(const struct xt_table_info *t,
951 curcpu = smp_processor_id(); 946 curcpu = smp_processor_id();
952 947
953 i = 0; 948 i = 0;
954 IP6T_ENTRY_ITERATE(t->entries[curcpu], 949 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
955 t->size, 950 SET_COUNTER(counters[i], iter->counters.bcnt,
956 set_entry_to_counter, 951 iter->counters.pcnt);
957 counters, 952 ++i;
958 &i); 953 }
959 954
960 for_each_possible_cpu(cpu) { 955 for_each_possible_cpu(cpu) {
961 if (cpu == curcpu) 956 if (cpu == curcpu)
962 continue; 957 continue;
963 i = 0; 958 i = 0;
964 xt_info_wrlock(cpu); 959 xt_info_wrlock(cpu);
965 IP6T_ENTRY_ITERATE(t->entries[cpu], 960 xt_entry_foreach(iter, t->entries[cpu], t->size) {
966 t->size, 961 ADD_COUNTER(counters[i], iter->counters.bcnt,
967 add_entry_to_counter, 962 iter->counters.pcnt);
968 counters, 963 ++i;
969 &i); 964 }
970 xt_info_wrunlock(cpu); 965 xt_info_wrunlock(cpu);
971 } 966 }
972 local_bh_enable(); 967 local_bh_enable();
973} 968}
974 969
975static struct xt_counters *alloc_counters(struct xt_table *table) 970static struct xt_counters *alloc_counters(const struct xt_table *table)
976{ 971{
977 unsigned int countersize; 972 unsigned int countersize;
978 struct xt_counters *counters; 973 struct xt_counters *counters;
979 struct xt_table_info *private = table->private; 974 const struct xt_table_info *private = table->private;
980 975
981 /* We need atomic snapshot of counters: rest doesn't change 976 /* We need atomic snapshot of counters: rest doesn't change
982 (other than comefrom, which userspace doesn't care 977 (other than comefrom, which userspace doesn't care
@@ -994,11 +989,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
994 989
995static int 990static int
996copy_entries_to_user(unsigned int total_size, 991copy_entries_to_user(unsigned int total_size,
997 struct xt_table *table, 992 const struct xt_table *table,
998 void __user *userptr) 993 void __user *userptr)
999{ 994{
1000 unsigned int off, num; 995 unsigned int off, num;
1001 struct ip6t_entry *e; 996 const struct ip6t_entry *e;
1002 struct xt_counters *counters; 997 struct xt_counters *counters;
1003 const struct xt_table_info *private = table->private; 998 const struct xt_table_info *private = table->private;
1004 int ret = 0; 999 int ret = 0;
@@ -1050,7 +1045,7 @@ copy_entries_to_user(unsigned int total_size,
1050 } 1045 }
1051 } 1046 }
1052 1047
1053 t = ip6t_get_target(e); 1048 t = ip6t_get_target_c(e);
1054 if (copy_to_user(userptr + off + e->target_offset 1049 if (copy_to_user(userptr + off + e->target_offset
1055 + offsetof(struct ip6t_entry_target, 1050 + offsetof(struct ip6t_entry_target,
1056 u.user.name), 1051 u.user.name),
@@ -1067,7 +1062,7 @@ copy_entries_to_user(unsigned int total_size,
1067} 1062}
1068 1063
1069#ifdef CONFIG_COMPAT 1064#ifdef CONFIG_COMPAT
1070static void compat_standard_from_user(void *dst, void *src) 1065static void compat_standard_from_user(void *dst, const void *src)
1071{ 1066{
1072 int v = *(compat_int_t *)src; 1067 int v = *(compat_int_t *)src;
1073 1068
@@ -1076,7 +1071,7 @@ static void compat_standard_from_user(void *dst, void *src)
1076 memcpy(dst, &v, sizeof(v)); 1071 memcpy(dst, &v, sizeof(v));
1077} 1072}
1078 1073
1079static int compat_standard_to_user(void __user *dst, void *src) 1074static int compat_standard_to_user(void __user *dst, const void *src)
1080{ 1075{
1081 compat_int_t cv = *(int *)src; 1076 compat_int_t cv = *(int *)src;
1082 1077
@@ -1085,25 +1080,20 @@ static int compat_standard_to_user(void __user *dst, void *src)
1085 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; 1080 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1086} 1081}
1087 1082
1088static inline int 1083static int compat_calc_entry(const struct ip6t_entry *e,
1089compat_calc_match(struct ip6t_entry_match *m, int *size)
1090{
1091 *size += xt_compat_match_offset(m->u.kernel.match);
1092 return 0;
1093}
1094
1095static int compat_calc_entry(struct ip6t_entry *e,
1096 const struct xt_table_info *info, 1084 const struct xt_table_info *info,
1097 void *base, struct xt_table_info *newinfo) 1085 const void *base, struct xt_table_info *newinfo)
1098{ 1086{
1099 struct ip6t_entry_target *t; 1087 const struct xt_entry_match *ematch;
1088 const struct ip6t_entry_target *t;
1100 unsigned int entry_offset; 1089 unsigned int entry_offset;
1101 int off, i, ret; 1090 int off, i, ret;
1102 1091
1103 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); 1092 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1104 entry_offset = (void *)e - base; 1093 entry_offset = (void *)e - base;
1105 IP6T_MATCH_ITERATE(e, compat_calc_match, &off); 1094 xt_ematch_foreach(ematch, e)
1106 t = ip6t_get_target(e); 1095 off += xt_compat_match_offset(ematch->u.kernel.match);
1096 t = ip6t_get_target_c(e);
1107 off += xt_compat_target_offset(t->u.kernel.target); 1097 off += xt_compat_target_offset(t->u.kernel.target);
1108 newinfo->size -= off; 1098 newinfo->size -= off;
1109 ret = xt_compat_add_offset(AF_INET6, entry_offset, off); 1099 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
@@ -1124,7 +1114,9 @@ static int compat_calc_entry(struct ip6t_entry *e,
1124static int compat_table_info(const struct xt_table_info *info, 1114static int compat_table_info(const struct xt_table_info *info,
1125 struct xt_table_info *newinfo) 1115 struct xt_table_info *newinfo)
1126{ 1116{
1117 struct ip6t_entry *iter;
1127 void *loc_cpu_entry; 1118 void *loc_cpu_entry;
1119 int ret;
1128 1120
1129 if (!newinfo || !info) 1121 if (!newinfo || !info)
1130 return -EINVAL; 1122 return -EINVAL;
@@ -1133,13 +1125,17 @@ static int compat_table_info(const struct xt_table_info *info,
1133 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); 1125 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1134 newinfo->initial_entries = 0; 1126 newinfo->initial_entries = 0;
1135 loc_cpu_entry = info->entries[raw_smp_processor_id()]; 1127 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1136 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size, 1128 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1137 compat_calc_entry, info, loc_cpu_entry, 1129 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1138 newinfo); 1130 if (ret != 0)
1131 return ret;
1132 }
1133 return 0;
1139} 1134}
1140#endif 1135#endif
1141 1136
1142static int get_info(struct net *net, void __user *user, int *len, int compat) 1137static int get_info(struct net *net, void __user *user,
1138 const int *len, int compat)
1143{ 1139{
1144 char name[IP6T_TABLE_MAXNAMELEN]; 1140 char name[IP6T_TABLE_MAXNAMELEN];
1145 struct xt_table *t; 1141 struct xt_table *t;
@@ -1199,7 +1195,8 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
1199} 1195}
1200 1196
1201static int 1197static int
1202get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len) 1198get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1199 const int *len)
1203{ 1200{
1204 int ret; 1201 int ret;
1205 struct ip6t_get_entries get; 1202 struct ip6t_get_entries get;
@@ -1247,6 +1244,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1247 struct xt_table_info *oldinfo; 1244 struct xt_table_info *oldinfo;
1248 struct xt_counters *counters; 1245 struct xt_counters *counters;
1249 const void *loc_cpu_old_entry; 1246 const void *loc_cpu_old_entry;
1247 struct ip6t_entry *iter;
1250 1248
1251 ret = 0; 1249 ret = 0;
1252 counters = vmalloc_node(num_counters * sizeof(struct xt_counters), 1250 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
@@ -1290,8 +1288,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1290 1288
1291 /* Decrease module usage counts and free resource */ 1289 /* Decrease module usage counts and free resource */
1292 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; 1290 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1293 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, 1291 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1294 NULL); 1292 cleanup_entry(iter, net);
1293
1295 xt_free_table_info(oldinfo); 1294 xt_free_table_info(oldinfo);
1296 if (copy_to_user(counters_ptr, counters, 1295 if (copy_to_user(counters_ptr, counters,
1297 sizeof(struct xt_counters) * num_counters) != 0) 1296 sizeof(struct xt_counters) * num_counters) != 0)
@@ -1310,12 +1309,13 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1310} 1309}
1311 1310
1312static int 1311static int
1313do_replace(struct net *net, void __user *user, unsigned int len) 1312do_replace(struct net *net, const void __user *user, unsigned int len)
1314{ 1313{
1315 int ret; 1314 int ret;
1316 struct ip6t_replace tmp; 1315 struct ip6t_replace tmp;
1317 struct xt_table_info *newinfo; 1316 struct xt_table_info *newinfo;
1318 void *loc_cpu_entry; 1317 void *loc_cpu_entry;
1318 struct ip6t_entry *iter;
1319 1319
1320 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1320 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1321 return -EFAULT; 1321 return -EFAULT;
@@ -1336,9 +1336,7 @@ do_replace(struct net *net, void __user *user, unsigned int len)
1336 goto free_newinfo; 1336 goto free_newinfo;
1337 } 1337 }
1338 1338
1339 ret = translate_table(tmp.name, tmp.valid_hooks, 1339 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1340 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1341 tmp.hook_entry, tmp.underflow);
1342 if (ret != 0) 1340 if (ret != 0)
1343 goto free_newinfo; 1341 goto free_newinfo;
1344 1342
@@ -1351,27 +1349,15 @@ do_replace(struct net *net, void __user *user, unsigned int len)
1351 return 0; 1349 return 0;
1352 1350
1353 free_newinfo_untrans: 1351 free_newinfo_untrans:
1354 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1352 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1353 cleanup_entry(iter, net);
1355 free_newinfo: 1354 free_newinfo:
1356 xt_free_table_info(newinfo); 1355 xt_free_table_info(newinfo);
1357 return ret; 1356 return ret;
1358} 1357}
1359 1358
1360/* We're lazy, and add to the first CPU; overflow works its fey magic
1361 * and everything is OK. */
1362static int 1359static int
1363add_counter_to_entry(struct ip6t_entry *e, 1360do_add_counters(struct net *net, const void __user *user, unsigned int len,
1364 const struct xt_counters addme[],
1365 unsigned int *i)
1366{
1367 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1368
1369 (*i)++;
1370 return 0;
1371}
1372
1373static int
1374do_add_counters(struct net *net, void __user *user, unsigned int len,
1375 int compat) 1361 int compat)
1376{ 1362{
1377 unsigned int i, curcpu; 1363 unsigned int i, curcpu;
@@ -1385,6 +1371,7 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
1385 const struct xt_table_info *private; 1371 const struct xt_table_info *private;
1386 int ret = 0; 1372 int ret = 0;
1387 const void *loc_cpu_entry; 1373 const void *loc_cpu_entry;
1374 struct ip6t_entry *iter;
1388#ifdef CONFIG_COMPAT 1375#ifdef CONFIG_COMPAT
1389 struct compat_xt_counters_info compat_tmp; 1376 struct compat_xt_counters_info compat_tmp;
1390 1377
@@ -1443,11 +1430,10 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
1443 curcpu = smp_processor_id(); 1430 curcpu = smp_processor_id();
1444 xt_info_wrlock(curcpu); 1431 xt_info_wrlock(curcpu);
1445 loc_cpu_entry = private->entries[curcpu]; 1432 loc_cpu_entry = private->entries[curcpu];
1446 IP6T_ENTRY_ITERATE(loc_cpu_entry, 1433 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1447 private->size, 1434 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1448 add_counter_to_entry, 1435 ++i;
1449 paddc, 1436 }
1450 &i);
1451 xt_info_wrunlock(curcpu); 1437 xt_info_wrunlock(curcpu);
1452 1438
1453 unlock_up_free: 1439 unlock_up_free:
@@ -1476,45 +1462,40 @@ struct compat_ip6t_replace {
1476static int 1462static int
1477compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr, 1463compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1478 unsigned int *size, struct xt_counters *counters, 1464 unsigned int *size, struct xt_counters *counters,
1479 unsigned int *i) 1465 unsigned int i)
1480{ 1466{
1481 struct ip6t_entry_target *t; 1467 struct ip6t_entry_target *t;
1482 struct compat_ip6t_entry __user *ce; 1468 struct compat_ip6t_entry __user *ce;
1483 u_int16_t target_offset, next_offset; 1469 u_int16_t target_offset, next_offset;
1484 compat_uint_t origsize; 1470 compat_uint_t origsize;
1485 int ret; 1471 const struct xt_entry_match *ematch;
1472 int ret = 0;
1486 1473
1487 ret = -EFAULT;
1488 origsize = *size; 1474 origsize = *size;
1489 ce = (struct compat_ip6t_entry __user *)*dstptr; 1475 ce = (struct compat_ip6t_entry __user *)*dstptr;
1490 if (copy_to_user(ce, e, sizeof(struct ip6t_entry))) 1476 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1491 goto out; 1477 copy_to_user(&ce->counters, &counters[i],
1492 1478 sizeof(counters[i])) != 0)
1493 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i]))) 1479 return -EFAULT;
1494 goto out;
1495 1480
1496 *dstptr += sizeof(struct compat_ip6t_entry); 1481 *dstptr += sizeof(struct compat_ip6t_entry);
1497 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); 1482 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1498 1483
1499 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size); 1484 xt_ematch_foreach(ematch, e) {
1485 ret = xt_compat_match_to_user(ematch, dstptr, size);
1486 if (ret != 0)
1487 return ret;
1488 }
1500 target_offset = e->target_offset - (origsize - *size); 1489 target_offset = e->target_offset - (origsize - *size);
1501 if (ret)
1502 goto out;
1503 t = ip6t_get_target(e); 1490 t = ip6t_get_target(e);
1504 ret = xt_compat_target_to_user(t, dstptr, size); 1491 ret = xt_compat_target_to_user(t, dstptr, size);
1505 if (ret) 1492 if (ret)
1506 goto out; 1493 return ret;
1507 ret = -EFAULT;
1508 next_offset = e->next_offset - (origsize - *size); 1494 next_offset = e->next_offset - (origsize - *size);
1509 if (put_user(target_offset, &ce->target_offset)) 1495 if (put_user(target_offset, &ce->target_offset) != 0 ||
1510 goto out; 1496 put_user(next_offset, &ce->next_offset) != 0)
1511 if (put_user(next_offset, &ce->next_offset)) 1497 return -EFAULT;
1512 goto out;
1513
1514 (*i)++;
1515 return 0; 1498 return 0;
1516out:
1517 return ret;
1518} 1499}
1519 1500
1520static int 1501static int
@@ -1522,7 +1503,7 @@ compat_find_calc_match(struct ip6t_entry_match *m,
1522 const char *name, 1503 const char *name,
1523 const struct ip6t_ip6 *ipv6, 1504 const struct ip6t_ip6 *ipv6,
1524 unsigned int hookmask, 1505 unsigned int hookmask,
1525 int *size, unsigned int *i) 1506 int *size)
1526{ 1507{
1527 struct xt_match *match; 1508 struct xt_match *match;
1528 1509
@@ -1536,47 +1517,32 @@ compat_find_calc_match(struct ip6t_entry_match *m,
1536 } 1517 }
1537 m->u.kernel.match = match; 1518 m->u.kernel.match = match;
1538 *size += xt_compat_match_offset(match); 1519 *size += xt_compat_match_offset(match);
1539
1540 (*i)++;
1541 return 0;
1542}
1543
1544static int
1545compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1546{
1547 if (i && (*i)-- == 0)
1548 return 1;
1549
1550 module_put(m->u.kernel.match->me);
1551 return 0; 1520 return 0;
1552} 1521}
1553 1522
1554static int 1523static void compat_release_entry(struct compat_ip6t_entry *e)
1555compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1556{ 1524{
1557 struct ip6t_entry_target *t; 1525 struct ip6t_entry_target *t;
1558 1526 struct xt_entry_match *ematch;
1559 if (i && (*i)-- == 0)
1560 return 1;
1561 1527
1562 /* Cleanup all matches */ 1528 /* Cleanup all matches */
1563 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL); 1529 xt_ematch_foreach(ematch, e)
1530 module_put(ematch->u.kernel.match->me);
1564 t = compat_ip6t_get_target(e); 1531 t = compat_ip6t_get_target(e);
1565 module_put(t->u.kernel.target->me); 1532 module_put(t->u.kernel.target->me);
1566 return 0;
1567} 1533}
1568 1534
1569static int 1535static int
1570check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, 1536check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1571 struct xt_table_info *newinfo, 1537 struct xt_table_info *newinfo,
1572 unsigned int *size, 1538 unsigned int *size,
1573 unsigned char *base, 1539 const unsigned char *base,
1574 unsigned char *limit, 1540 const unsigned char *limit,
1575 unsigned int *hook_entries, 1541 const unsigned int *hook_entries,
1576 unsigned int *underflows, 1542 const unsigned int *underflows,
1577 unsigned int *i,
1578 const char *name) 1543 const char *name)
1579{ 1544{
1545 struct xt_entry_match *ematch;
1580 struct ip6t_entry_target *t; 1546 struct ip6t_entry_target *t;
1581 struct xt_target *target; 1547 struct xt_target *target;
1582 unsigned int entry_offset; 1548 unsigned int entry_offset;
@@ -1605,10 +1571,13 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1605 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); 1571 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1606 entry_offset = (void *)e - (void *)base; 1572 entry_offset = (void *)e - (void *)base;
1607 j = 0; 1573 j = 0;
1608 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name, 1574 xt_ematch_foreach(ematch, e) {
1609 &e->ipv6, e->comefrom, &off, &j); 1575 ret = compat_find_calc_match(ematch, name,
1610 if (ret != 0) 1576 &e->ipv6, e->comefrom, &off);
1611 goto release_matches; 1577 if (ret != 0)
1578 goto release_matches;
1579 ++j;
1580 }
1612 1581
1613 t = compat_ip6t_get_target(e); 1582 t = compat_ip6t_get_target(e);
1614 target = try_then_request_module(xt_find_target(AF_INET6, 1583 target = try_then_request_module(xt_find_target(AF_INET6,
@@ -1640,14 +1609,16 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1640 /* Clear counters and comefrom */ 1609 /* Clear counters and comefrom */
1641 memset(&e->counters, 0, sizeof(e->counters)); 1610 memset(&e->counters, 0, sizeof(e->counters));
1642 e->comefrom = 0; 1611 e->comefrom = 0;
1643
1644 (*i)++;
1645 return 0; 1612 return 0;
1646 1613
1647out: 1614out:
1648 module_put(t->u.kernel.target->me); 1615 module_put(t->u.kernel.target->me);
1649release_matches: 1616release_matches:
1650 IP6T_MATCH_ITERATE(e, compat_release_match, &j); 1617 xt_ematch_foreach(ematch, e) {
1618 if (j-- == 0)
1619 break;
1620 module_put(ematch->u.kernel.match->me);
1621 }
1651 return ret; 1622 return ret;
1652} 1623}
1653 1624
@@ -1661,6 +1632,7 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1661 struct ip6t_entry *de; 1632 struct ip6t_entry *de;
1662 unsigned int origsize; 1633 unsigned int origsize;
1663 int ret, h; 1634 int ret, h;
1635 struct xt_entry_match *ematch;
1664 1636
1665 ret = 0; 1637 ret = 0;
1666 origsize = *size; 1638 origsize = *size;
@@ -1671,10 +1643,11 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1671 *dstptr += sizeof(struct ip6t_entry); 1643 *dstptr += sizeof(struct ip6t_entry);
1672 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); 1644 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1673 1645
1674 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user, 1646 xt_ematch_foreach(ematch, e) {
1675 dstptr, size); 1647 ret = xt_compat_match_from_user(ematch, dstptr, size);
1676 if (ret) 1648 if (ret != 0)
1677 return ret; 1649 return ret;
1650 }
1678 de->target_offset = e->target_offset - (origsize - *size); 1651 de->target_offset = e->target_offset - (origsize - *size);
1679 t = compat_ip6t_get_target(e); 1652 t = compat_ip6t_get_target(e);
1680 target = t->u.kernel.target; 1653 target = t->u.kernel.target;
@@ -1690,36 +1663,44 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1690 return ret; 1663 return ret;
1691} 1664}
1692 1665
1693static int compat_check_entry(struct ip6t_entry *e, const char *name, 1666static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1694 unsigned int *i) 1667 const char *name)
1695{ 1668{
1696 unsigned int j; 1669 unsigned int j;
1697 int ret; 1670 int ret = 0;
1698 struct xt_mtchk_param mtpar; 1671 struct xt_mtchk_param mtpar;
1672 struct xt_entry_match *ematch;
1699 1673
1700 j = 0; 1674 j = 0;
1675 mtpar.net = net;
1701 mtpar.table = name; 1676 mtpar.table = name;
1702 mtpar.entryinfo = &e->ipv6; 1677 mtpar.entryinfo = &e->ipv6;
1703 mtpar.hook_mask = e->comefrom; 1678 mtpar.hook_mask = e->comefrom;
1704 mtpar.family = NFPROTO_IPV6; 1679 mtpar.family = NFPROTO_IPV6;
1705 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j); 1680 xt_ematch_foreach(ematch, e) {
1706 if (ret) 1681 ret = check_match(ematch, &mtpar);
1707 goto cleanup_matches; 1682 if (ret != 0)
1683 goto cleanup_matches;
1684 ++j;
1685 }
1708 1686
1709 ret = check_target(e, name); 1687 ret = check_target(e, net, name);
1710 if (ret) 1688 if (ret)
1711 goto cleanup_matches; 1689 goto cleanup_matches;
1712
1713 (*i)++;
1714 return 0; 1690 return 0;
1715 1691
1716 cleanup_matches: 1692 cleanup_matches:
1717 IP6T_MATCH_ITERATE(e, cleanup_match, &j); 1693 xt_ematch_foreach(ematch, e) {
1694 if (j-- == 0)
1695 break;
1696 cleanup_match(ematch, net);
1697 }
1718 return ret; 1698 return ret;
1719} 1699}
1720 1700
1721static int 1701static int
1722translate_compat_table(const char *name, 1702translate_compat_table(struct net *net,
1703 const char *name,
1723 unsigned int valid_hooks, 1704 unsigned int valid_hooks,
1724 struct xt_table_info **pinfo, 1705 struct xt_table_info **pinfo,
1725 void **pentry0, 1706 void **pentry0,
@@ -1731,8 +1712,10 @@ translate_compat_table(const char *name,
1731 unsigned int i, j; 1712 unsigned int i, j;
1732 struct xt_table_info *newinfo, *info; 1713 struct xt_table_info *newinfo, *info;
1733 void *pos, *entry0, *entry1; 1714 void *pos, *entry0, *entry1;
1715 struct compat_ip6t_entry *iter0;
1716 struct ip6t_entry *iter1;
1734 unsigned int size; 1717 unsigned int size;
1735 int ret; 1718 int ret = 0;
1736 1719
1737 info = *pinfo; 1720 info = *pinfo;
1738 entry0 = *pentry0; 1721 entry0 = *pentry0;
@@ -1749,13 +1732,14 @@ translate_compat_table(const char *name,
1749 j = 0; 1732 j = 0;
1750 xt_compat_lock(AF_INET6); 1733 xt_compat_lock(AF_INET6);
1751 /* Walk through entries, checking offsets. */ 1734 /* Walk through entries, checking offsets. */
1752 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, 1735 xt_entry_foreach(iter0, entry0, total_size) {
1753 check_compat_entry_size_and_hooks, 1736 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1754 info, &size, entry0, 1737 entry0, entry0 + total_size, hook_entries, underflows,
1755 entry0 + total_size, 1738 name);
1756 hook_entries, underflows, &j, name); 1739 if (ret != 0)
1757 if (ret != 0) 1740 goto out_unlock;
1758 goto out_unlock; 1741 ++j;
1742 }
1759 1743
1760 ret = -EINVAL; 1744 ret = -EINVAL;
1761 if (j != number) { 1745 if (j != number) {
@@ -1794,9 +1778,12 @@ translate_compat_table(const char *name,
1794 entry1 = newinfo->entries[raw_smp_processor_id()]; 1778 entry1 = newinfo->entries[raw_smp_processor_id()];
1795 pos = entry1; 1779 pos = entry1;
1796 size = total_size; 1780 size = total_size;
1797 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, 1781 xt_entry_foreach(iter0, entry0, total_size) {
1798 compat_copy_entry_from_user, 1782 ret = compat_copy_entry_from_user(iter0, &pos,
1799 &pos, &size, name, newinfo, entry1); 1783 &size, name, newinfo, entry1);
1784 if (ret != 0)
1785 break;
1786 }
1800 xt_compat_flush_offsets(AF_INET6); 1787 xt_compat_flush_offsets(AF_INET6);
1801 xt_compat_unlock(AF_INET6); 1788 xt_compat_unlock(AF_INET6);
1802 if (ret) 1789 if (ret)
@@ -1807,13 +1794,32 @@ translate_compat_table(const char *name,
1807 goto free_newinfo; 1794 goto free_newinfo;
1808 1795
1809 i = 0; 1796 i = 0;
1810 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry, 1797 xt_entry_foreach(iter1, entry1, newinfo->size) {
1811 name, &i); 1798 ret = compat_check_entry(iter1, net, name);
1799 if (ret != 0)
1800 break;
1801 ++i;
1802 }
1812 if (ret) { 1803 if (ret) {
1804 /*
1805 * The first i matches need cleanup_entry (calls ->destroy)
1806 * because they had called ->check already. The other j-i
1807 * entries need only release.
1808 */
1809 int skip = i;
1813 j -= i; 1810 j -= i;
1814 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i, 1811 xt_entry_foreach(iter0, entry0, newinfo->size) {
1815 compat_release_entry, &j); 1812 if (skip-- > 0)
1816 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i); 1813 continue;
1814 if (j-- == 0)
1815 break;
1816 compat_release_entry(iter0);
1817 }
1818 xt_entry_foreach(iter1, entry1, newinfo->size) {
1819 if (i-- == 0)
1820 break;
1821 cleanup_entry(iter1, net);
1822 }
1817 xt_free_table_info(newinfo); 1823 xt_free_table_info(newinfo);
1818 return ret; 1824 return ret;
1819 } 1825 }
@@ -1831,7 +1837,11 @@ translate_compat_table(const char *name,
1831free_newinfo: 1837free_newinfo:
1832 xt_free_table_info(newinfo); 1838 xt_free_table_info(newinfo);
1833out: 1839out:
1834 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j); 1840 xt_entry_foreach(iter0, entry0, total_size) {
1841 if (j-- == 0)
1842 break;
1843 compat_release_entry(iter0);
1844 }
1835 return ret; 1845 return ret;
1836out_unlock: 1846out_unlock:
1837 xt_compat_flush_offsets(AF_INET6); 1847 xt_compat_flush_offsets(AF_INET6);
@@ -1846,6 +1856,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1846 struct compat_ip6t_replace tmp; 1856 struct compat_ip6t_replace tmp;
1847 struct xt_table_info *newinfo; 1857 struct xt_table_info *newinfo;
1848 void *loc_cpu_entry; 1858 void *loc_cpu_entry;
1859 struct ip6t_entry *iter;
1849 1860
1850 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1861 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1851 return -EFAULT; 1862 return -EFAULT;
@@ -1868,7 +1879,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1868 goto free_newinfo; 1879 goto free_newinfo;
1869 } 1880 }
1870 1881
1871 ret = translate_compat_table(tmp.name, tmp.valid_hooks, 1882 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1872 &newinfo, &loc_cpu_entry, tmp.size, 1883 &newinfo, &loc_cpu_entry, tmp.size,
1873 tmp.num_entries, tmp.hook_entry, 1884 tmp.num_entries, tmp.hook_entry,
1874 tmp.underflow); 1885 tmp.underflow);
@@ -1884,7 +1895,8 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
1884 return 0; 1895 return 0;
1885 1896
1886 free_newinfo_untrans: 1897 free_newinfo_untrans:
1887 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); 1898 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1899 cleanup_entry(iter, net);
1888 free_newinfo: 1900 free_newinfo:
1889 xt_free_table_info(newinfo); 1901 xt_free_table_info(newinfo);
1890 return ret; 1902 return ret;
@@ -1933,6 +1945,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1933 int ret = 0; 1945 int ret = 0;
1934 const void *loc_cpu_entry; 1946 const void *loc_cpu_entry;
1935 unsigned int i = 0; 1947 unsigned int i = 0;
1948 struct ip6t_entry *iter;
1936 1949
1937 counters = alloc_counters(table); 1950 counters = alloc_counters(table);
1938 if (IS_ERR(counters)) 1951 if (IS_ERR(counters))
@@ -1945,9 +1958,12 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1945 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 1958 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1946 pos = userptr; 1959 pos = userptr;
1947 size = total_size; 1960 size = total_size;
1948 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size, 1961 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1949 compat_copy_entry_to_user, 1962 ret = compat_copy_entry_to_user(iter, &pos,
1950 &pos, &size, counters, &i); 1963 &size, counters, i++);
1964 if (ret != 0)
1965 break;
1966 }
1951 1967
1952 vfree(counters); 1968 vfree(counters);
1953 return ret; 1969 return ret;
@@ -2121,11 +2137,7 @@ struct xt_table *ip6t_register_table(struct net *net,
2121 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; 2137 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2122 memcpy(loc_cpu_entry, repl->entries, repl->size); 2138 memcpy(loc_cpu_entry, repl->entries, repl->size);
2123 2139
2124 ret = translate_table(table->name, table->valid_hooks, 2140 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2125 newinfo, loc_cpu_entry, repl->size,
2126 repl->num_entries,
2127 repl->hook_entry,
2128 repl->underflow);
2129 if (ret != 0) 2141 if (ret != 0)
2130 goto out_free; 2142 goto out_free;
2131 2143
@@ -2142,17 +2154,19 @@ out:
2142 return ERR_PTR(ret); 2154 return ERR_PTR(ret);
2143} 2155}
2144 2156
2145void ip6t_unregister_table(struct xt_table *table) 2157void ip6t_unregister_table(struct net *net, struct xt_table *table)
2146{ 2158{
2147 struct xt_table_info *private; 2159 struct xt_table_info *private;
2148 void *loc_cpu_entry; 2160 void *loc_cpu_entry;
2149 struct module *table_owner = table->me; 2161 struct module *table_owner = table->me;
2162 struct ip6t_entry *iter;
2150 2163
2151 private = xt_unregister_table(table); 2164 private = xt_unregister_table(table);
2152 2165
2153 /* Decrease module usage counts and free resources */ 2166 /* Decrease module usage counts and free resources */
2154 loc_cpu_entry = private->entries[raw_smp_processor_id()]; 2167 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2155 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL); 2168 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2169 cleanup_entry(iter, net);
2156 if (private->number > private->initial_entries) 2170 if (private->number > private->initial_entries)
2157 module_put(table_owner); 2171 module_put(table_owner);
2158 xt_free_table_info(private); 2172 xt_free_table_info(private);
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 8311ca31816..dd8afbaf00a 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -169,7 +169,7 @@ send_unreach(struct net *net, struct sk_buff *skb_in, unsigned char code,
169 if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL) 169 if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
170 skb_in->dev = net->loopback_dev; 170 skb_in->dev = net->loopback_dev;
171 171
172 icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0, NULL); 172 icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
173} 173}
174 174
175static unsigned int 175static unsigned int
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index ad378efd0eb..36b72cafc22 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -21,99 +21,26 @@ MODULE_DESCRIPTION("ip6tables filter table");
21 (1 << NF_INET_FORWARD) | \ 21 (1 << NF_INET_FORWARD) | \
22 (1 << NF_INET_LOCAL_OUT)) 22 (1 << NF_INET_LOCAL_OUT))
23 23
24static struct
25{
26 struct ip6t_replace repl;
27 struct ip6t_standard entries[3];
28 struct ip6t_error term;
29} initial_table __net_initdata = {
30 .repl = {
31 .name = "filter",
32 .valid_hooks = FILTER_VALID_HOOKS,
33 .num_entries = 4,
34 .size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error),
35 .hook_entry = {
36 [NF_INET_LOCAL_IN] = 0,
37 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
38 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2
39 },
40 .underflow = {
41 [NF_INET_LOCAL_IN] = 0,
42 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
43 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2
44 },
45 },
46 .entries = {
47 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
48 IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
49 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
50 },
51 .term = IP6T_ERROR_INIT, /* ERROR */
52};
53
54static const struct xt_table packet_filter = { 24static const struct xt_table packet_filter = {
55 .name = "filter", 25 .name = "filter",
56 .valid_hooks = FILTER_VALID_HOOKS, 26 .valid_hooks = FILTER_VALID_HOOKS,
57 .me = THIS_MODULE, 27 .me = THIS_MODULE,
58 .af = NFPROTO_IPV6, 28 .af = NFPROTO_IPV6,
29 .priority = NF_IP6_PRI_FILTER,
59}; 30};
60 31
61/* The work comes in here from netfilter.c. */ 32/* The work comes in here from netfilter.c. */
62static unsigned int 33static unsigned int
63ip6t_in_hook(unsigned int hook, 34ip6table_filter_hook(unsigned int hook, struct sk_buff *skb,
64 struct sk_buff *skb, 35 const struct net_device *in, const struct net_device *out,
65 const struct net_device *in, 36 int (*okfn)(struct sk_buff *))
66 const struct net_device *out,
67 int (*okfn)(struct sk_buff *))
68{
69 return ip6t_do_table(skb, hook, in, out,
70 dev_net(in)->ipv6.ip6table_filter);
71}
72
73static unsigned int
74ip6t_local_out_hook(unsigned int hook,
75 struct sk_buff *skb,
76 const struct net_device *in,
77 const struct net_device *out,
78 int (*okfn)(struct sk_buff *))
79{ 37{
80#if 0 38 const struct net *net = dev_net((in != NULL) ? in : out);
81 /* root is playing with raw sockets. */
82 if (skb->len < sizeof(struct iphdr) ||
83 ip_hdrlen(skb) < sizeof(struct iphdr)) {
84 if (net_ratelimit())
85 printk("ip6t_hook: happy cracking.\n");
86 return NF_ACCEPT;
87 }
88#endif
89 39
90 return ip6t_do_table(skb, hook, in, out, 40 return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_filter);
91 dev_net(out)->ipv6.ip6table_filter);
92} 41}
93 42
94static struct nf_hook_ops ip6t_ops[] __read_mostly = { 43static struct nf_hook_ops *filter_ops __read_mostly;
95 {
96 .hook = ip6t_in_hook,
97 .owner = THIS_MODULE,
98 .pf = NFPROTO_IPV6,
99 .hooknum = NF_INET_LOCAL_IN,
100 .priority = NF_IP6_PRI_FILTER,
101 },
102 {
103 .hook = ip6t_in_hook,
104 .owner = THIS_MODULE,
105 .pf = NFPROTO_IPV6,
106 .hooknum = NF_INET_FORWARD,
107 .priority = NF_IP6_PRI_FILTER,
108 },
109 {
110 .hook = ip6t_local_out_hook,
111 .owner = THIS_MODULE,
112 .pf = NFPROTO_IPV6,
113 .hooknum = NF_INET_LOCAL_OUT,
114 .priority = NF_IP6_PRI_FILTER,
115 },
116};
117 44
118/* Default to forward because I got too much mail already. */ 45/* Default to forward because I got too much mail already. */
119static int forward = NF_ACCEPT; 46static int forward = NF_ACCEPT;
@@ -121,9 +48,18 @@ module_param(forward, bool, 0000);
121 48
122static int __net_init ip6table_filter_net_init(struct net *net) 49static int __net_init ip6table_filter_net_init(struct net *net)
123{ 50{
124 /* Register table */ 51 struct ip6t_replace *repl;
52
53 repl = ip6t_alloc_initial_table(&packet_filter);
54 if (repl == NULL)
55 return -ENOMEM;
56 /* Entry 1 is the FORWARD hook */
57 ((struct ip6t_standard *)repl->entries)[1].target.verdict =
58 -forward - 1;
59
125 net->ipv6.ip6table_filter = 60 net->ipv6.ip6table_filter =
126 ip6t_register_table(net, &packet_filter, &initial_table.repl); 61 ip6t_register_table(net, &packet_filter, repl);
62 kfree(repl);
127 if (IS_ERR(net->ipv6.ip6table_filter)) 63 if (IS_ERR(net->ipv6.ip6table_filter))
128 return PTR_ERR(net->ipv6.ip6table_filter); 64 return PTR_ERR(net->ipv6.ip6table_filter);
129 return 0; 65 return 0;
@@ -131,7 +67,7 @@ static int __net_init ip6table_filter_net_init(struct net *net)
131 67
132static void __net_exit ip6table_filter_net_exit(struct net *net) 68static void __net_exit ip6table_filter_net_exit(struct net *net)
133{ 69{
134 ip6t_unregister_table(net->ipv6.ip6table_filter); 70 ip6t_unregister_table(net, net->ipv6.ip6table_filter);
135} 71}
136 72
137static struct pernet_operations ip6table_filter_net_ops = { 73static struct pernet_operations ip6table_filter_net_ops = {
@@ -148,17 +84,16 @@ static int __init ip6table_filter_init(void)
148 return -EINVAL; 84 return -EINVAL;
149 } 85 }
150 86
151 /* Entry 1 is the FORWARD hook */
152 initial_table.entries[1].target.verdict = -forward - 1;
153
154 ret = register_pernet_subsys(&ip6table_filter_net_ops); 87 ret = register_pernet_subsys(&ip6table_filter_net_ops);
155 if (ret < 0) 88 if (ret < 0)
156 return ret; 89 return ret;
157 90
158 /* Register hooks */ 91 /* Register hooks */
159 ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 92 filter_ops = xt_hook_link(&packet_filter, ip6table_filter_hook);
160 if (ret < 0) 93 if (IS_ERR(filter_ops)) {
94 ret = PTR_ERR(filter_ops);
161 goto cleanup_table; 95 goto cleanup_table;
96 }
162 97
163 return ret; 98 return ret;
164 99
@@ -169,7 +104,7 @@ static int __init ip6table_filter_init(void)
169 104
170static void __exit ip6table_filter_fini(void) 105static void __exit ip6table_filter_fini(void)
171{ 106{
172 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 107 xt_hook_unlink(&packet_filter, filter_ops);
173 unregister_pernet_subsys(&ip6table_filter_net_ops); 108 unregister_pernet_subsys(&ip6table_filter_net_ops);
174} 109}
175 110
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index a929c19d30e..7844e557c0e 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -21,80 +21,17 @@ MODULE_DESCRIPTION("ip6tables mangle table");
21 (1 << NF_INET_LOCAL_OUT) | \ 21 (1 << NF_INET_LOCAL_OUT) | \
22 (1 << NF_INET_POST_ROUTING)) 22 (1 << NF_INET_POST_ROUTING))
23 23
24static const struct
25{
26 struct ip6t_replace repl;
27 struct ip6t_standard entries[5];
28 struct ip6t_error term;
29} initial_table __net_initdata = {
30 .repl = {
31 .name = "mangle",
32 .valid_hooks = MANGLE_VALID_HOOKS,
33 .num_entries = 6,
34 .size = sizeof(struct ip6t_standard) * 5 + sizeof(struct ip6t_error),
35 .hook_entry = {
36 [NF_INET_PRE_ROUTING] = 0,
37 [NF_INET_LOCAL_IN] = sizeof(struct ip6t_standard),
38 [NF_INET_FORWARD] = sizeof(struct ip6t_standard) * 2,
39 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3,
40 [NF_INET_POST_ROUTING] = sizeof(struct ip6t_standard) * 4,
41 },
42 .underflow = {
43 [NF_INET_PRE_ROUTING] = 0,
44 [NF_INET_LOCAL_IN] = sizeof(struct ip6t_standard),
45 [NF_INET_FORWARD] = sizeof(struct ip6t_standard) * 2,
46 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3,
47 [NF_INET_POST_ROUTING] = sizeof(struct ip6t_standard) * 4,
48 },
49 },
50 .entries = {
51 IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
52 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
53 IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
54 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
55 IP6T_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
56 },
57 .term = IP6T_ERROR_INIT, /* ERROR */
58};
59
60static const struct xt_table packet_mangler = { 24static const struct xt_table packet_mangler = {
61 .name = "mangle", 25 .name = "mangle",
62 .valid_hooks = MANGLE_VALID_HOOKS, 26 .valid_hooks = MANGLE_VALID_HOOKS,
63 .me = THIS_MODULE, 27 .me = THIS_MODULE,
64 .af = NFPROTO_IPV6, 28 .af = NFPROTO_IPV6,
29 .priority = NF_IP6_PRI_MANGLE,
65}; 30};
66 31
67/* The work comes in here from netfilter.c. */
68static unsigned int
69ip6t_in_hook(unsigned int hook,
70 struct sk_buff *skb,
71 const struct net_device *in,
72 const struct net_device *out,
73 int (*okfn)(struct sk_buff *))
74{
75 return ip6t_do_table(skb, hook, in, out,
76 dev_net(in)->ipv6.ip6table_mangle);
77}
78
79static unsigned int
80ip6t_post_routing_hook(unsigned int hook,
81 struct sk_buff *skb,
82 const struct net_device *in,
83 const struct net_device *out,
84 int (*okfn)(struct sk_buff *))
85{
86 return ip6t_do_table(skb, hook, in, out,
87 dev_net(out)->ipv6.ip6table_mangle);
88}
89
90static unsigned int 32static unsigned int
91ip6t_local_out_hook(unsigned int hook, 33ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
92 struct sk_buff *skb,
93 const struct net_device *in,
94 const struct net_device *out,
95 int (*okfn)(struct sk_buff *))
96{ 34{
97
98 unsigned int ret; 35 unsigned int ret;
99 struct in6_addr saddr, daddr; 36 struct in6_addr saddr, daddr;
100 u_int8_t hop_limit; 37 u_int8_t hop_limit;
@@ -119,7 +56,7 @@ ip6t_local_out_hook(unsigned int hook,
119 /* flowlabel and prio (includes version, which shouldn't change either */ 56 /* flowlabel and prio (includes version, which shouldn't change either */
120 flowlabel = *((u_int32_t *)ipv6_hdr(skb)); 57 flowlabel = *((u_int32_t *)ipv6_hdr(skb));
121 58
122 ret = ip6t_do_table(skb, hook, in, out, 59 ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
123 dev_net(out)->ipv6.ip6table_mangle); 60 dev_net(out)->ipv6.ip6table_mangle);
124 61
125 if (ret != NF_DROP && ret != NF_STOLEN && 62 if (ret != NF_DROP && ret != NF_STOLEN &&
@@ -132,49 +69,33 @@ ip6t_local_out_hook(unsigned int hook,
132 return ret; 69 return ret;
133} 70}
134 71
135static struct nf_hook_ops ip6t_ops[] __read_mostly = { 72/* The work comes in here from netfilter.c. */
136 { 73static unsigned int
137 .hook = ip6t_in_hook, 74ip6table_mangle_hook(unsigned int hook, struct sk_buff *skb,
138 .owner = THIS_MODULE, 75 const struct net_device *in, const struct net_device *out,
139 .pf = NFPROTO_IPV6, 76 int (*okfn)(struct sk_buff *))
140 .hooknum = NF_INET_PRE_ROUTING, 77{
141 .priority = NF_IP6_PRI_MANGLE, 78 if (hook == NF_INET_LOCAL_OUT)
142 }, 79 return ip6t_mangle_out(skb, out);
143 { 80 if (hook == NF_INET_POST_ROUTING)
144 .hook = ip6t_in_hook, 81 return ip6t_do_table(skb, hook, in, out,
145 .owner = THIS_MODULE, 82 dev_net(out)->ipv6.ip6table_mangle);
146 .pf = NFPROTO_IPV6, 83 /* INPUT/FORWARD */
147 .hooknum = NF_INET_LOCAL_IN, 84 return ip6t_do_table(skb, hook, in, out,
148 .priority = NF_IP6_PRI_MANGLE, 85 dev_net(in)->ipv6.ip6table_mangle);
149 }, 86}
150 {
151 .hook = ip6t_in_hook,
152 .owner = THIS_MODULE,
153 .pf = NFPROTO_IPV6,
154 .hooknum = NF_INET_FORWARD,
155 .priority = NF_IP6_PRI_MANGLE,
156 },
157 {
158 .hook = ip6t_local_out_hook,
159 .owner = THIS_MODULE,
160 .pf = NFPROTO_IPV6,
161 .hooknum = NF_INET_LOCAL_OUT,
162 .priority = NF_IP6_PRI_MANGLE,
163 },
164 {
165 .hook = ip6t_post_routing_hook,
166 .owner = THIS_MODULE,
167 .pf = NFPROTO_IPV6,
168 .hooknum = NF_INET_POST_ROUTING,
169 .priority = NF_IP6_PRI_MANGLE,
170 },
171};
172 87
88static struct nf_hook_ops *mangle_ops __read_mostly;
173static int __net_init ip6table_mangle_net_init(struct net *net) 89static int __net_init ip6table_mangle_net_init(struct net *net)
174{ 90{
175 /* Register table */ 91 struct ip6t_replace *repl;
92
93 repl = ip6t_alloc_initial_table(&packet_mangler);
94 if (repl == NULL)
95 return -ENOMEM;
176 net->ipv6.ip6table_mangle = 96 net->ipv6.ip6table_mangle =
177 ip6t_register_table(net, &packet_mangler, &initial_table.repl); 97 ip6t_register_table(net, &packet_mangler, repl);
98 kfree(repl);
178 if (IS_ERR(net->ipv6.ip6table_mangle)) 99 if (IS_ERR(net->ipv6.ip6table_mangle))
179 return PTR_ERR(net->ipv6.ip6table_mangle); 100 return PTR_ERR(net->ipv6.ip6table_mangle);
180 return 0; 101 return 0;
@@ -182,7 +103,7 @@ static int __net_init ip6table_mangle_net_init(struct net *net)
182 103
183static void __net_exit ip6table_mangle_net_exit(struct net *net) 104static void __net_exit ip6table_mangle_net_exit(struct net *net)
184{ 105{
185 ip6t_unregister_table(net->ipv6.ip6table_mangle); 106 ip6t_unregister_table(net, net->ipv6.ip6table_mangle);
186} 107}
187 108
188static struct pernet_operations ip6table_mangle_net_ops = { 109static struct pernet_operations ip6table_mangle_net_ops = {
@@ -199,9 +120,11 @@ static int __init ip6table_mangle_init(void)
199 return ret; 120 return ret;
200 121
201 /* Register hooks */ 122 /* Register hooks */
202 ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 123 mangle_ops = xt_hook_link(&packet_mangler, ip6table_mangle_hook);
203 if (ret < 0) 124 if (IS_ERR(mangle_ops)) {
125 ret = PTR_ERR(mangle_ops);
204 goto cleanup_table; 126 goto cleanup_table;
127 }
205 128
206 return ret; 129 return ret;
207 130
@@ -212,7 +135,7 @@ static int __init ip6table_mangle_init(void)
212 135
213static void __exit ip6table_mangle_fini(void) 136static void __exit ip6table_mangle_fini(void)
214{ 137{
215 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 138 xt_hook_unlink(&packet_mangler, mangle_ops);
216 unregister_pernet_subsys(&ip6table_mangle_net_ops); 139 unregister_pernet_subsys(&ip6table_mangle_net_ops);
217} 140}
218 141
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index ed1a1180f3b..aef31a29de9 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -8,85 +8,37 @@
8 8
9#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT)) 9#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
10 10
11static const struct
12{
13 struct ip6t_replace repl;
14 struct ip6t_standard entries[2];
15 struct ip6t_error term;
16} initial_table __net_initdata = {
17 .repl = {
18 .name = "raw",
19 .valid_hooks = RAW_VALID_HOOKS,
20 .num_entries = 3,
21 .size = sizeof(struct ip6t_standard) * 2 + sizeof(struct ip6t_error),
22 .hook_entry = {
23 [NF_INET_PRE_ROUTING] = 0,
24 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard)
25 },
26 .underflow = {
27 [NF_INET_PRE_ROUTING] = 0,
28 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard)
29 },
30 },
31 .entries = {
32 IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
33 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
34 },
35 .term = IP6T_ERROR_INIT, /* ERROR */
36};
37
38static const struct xt_table packet_raw = { 11static const struct xt_table packet_raw = {
39 .name = "raw", 12 .name = "raw",
40 .valid_hooks = RAW_VALID_HOOKS, 13 .valid_hooks = RAW_VALID_HOOKS,
41 .me = THIS_MODULE, 14 .me = THIS_MODULE,
42 .af = NFPROTO_IPV6, 15 .af = NFPROTO_IPV6,
16 .priority = NF_IP6_PRI_FIRST,
43}; 17};
44 18
45/* The work comes in here from netfilter.c. */ 19/* The work comes in here from netfilter.c. */
46static unsigned int 20static unsigned int
47ip6t_pre_routing_hook(unsigned int hook, 21ip6table_raw_hook(unsigned int hook, struct sk_buff *skb,
48 struct sk_buff *skb, 22 const struct net_device *in, const struct net_device *out,
49 const struct net_device *in, 23 int (*okfn)(struct sk_buff *))
50 const struct net_device *out,
51 int (*okfn)(struct sk_buff *))
52{ 24{
53 return ip6t_do_table(skb, hook, in, out, 25 const struct net *net = dev_net((in != NULL) ? in : out);
54 dev_net(in)->ipv6.ip6table_raw);
55}
56 26
57static unsigned int 27 return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_raw);
58ip6t_local_out_hook(unsigned int hook,
59 struct sk_buff *skb,
60 const struct net_device *in,
61 const struct net_device *out,
62 int (*okfn)(struct sk_buff *))
63{
64 return ip6t_do_table(skb, hook, in, out,
65 dev_net(out)->ipv6.ip6table_raw);
66} 28}
67 29
68static struct nf_hook_ops ip6t_ops[] __read_mostly = { 30static struct nf_hook_ops *rawtable_ops __read_mostly;
69 {
70 .hook = ip6t_pre_routing_hook,
71 .pf = NFPROTO_IPV6,
72 .hooknum = NF_INET_PRE_ROUTING,
73 .priority = NF_IP6_PRI_FIRST,
74 .owner = THIS_MODULE,
75 },
76 {
77 .hook = ip6t_local_out_hook,
78 .pf = NFPROTO_IPV6,
79 .hooknum = NF_INET_LOCAL_OUT,
80 .priority = NF_IP6_PRI_FIRST,
81 .owner = THIS_MODULE,
82 },
83};
84 31
85static int __net_init ip6table_raw_net_init(struct net *net) 32static int __net_init ip6table_raw_net_init(struct net *net)
86{ 33{
87 /* Register table */ 34 struct ip6t_replace *repl;
35
36 repl = ip6t_alloc_initial_table(&packet_raw);
37 if (repl == NULL)
38 return -ENOMEM;
88 net->ipv6.ip6table_raw = 39 net->ipv6.ip6table_raw =
89 ip6t_register_table(net, &packet_raw, &initial_table.repl); 40 ip6t_register_table(net, &packet_raw, repl);
41 kfree(repl);
90 if (IS_ERR(net->ipv6.ip6table_raw)) 42 if (IS_ERR(net->ipv6.ip6table_raw))
91 return PTR_ERR(net->ipv6.ip6table_raw); 43 return PTR_ERR(net->ipv6.ip6table_raw);
92 return 0; 44 return 0;
@@ -94,7 +46,7 @@ static int __net_init ip6table_raw_net_init(struct net *net)
94 46
95static void __net_exit ip6table_raw_net_exit(struct net *net) 47static void __net_exit ip6table_raw_net_exit(struct net *net)
96{ 48{
97 ip6t_unregister_table(net->ipv6.ip6table_raw); 49 ip6t_unregister_table(net, net->ipv6.ip6table_raw);
98} 50}
99 51
100static struct pernet_operations ip6table_raw_net_ops = { 52static struct pernet_operations ip6table_raw_net_ops = {
@@ -111,9 +63,11 @@ static int __init ip6table_raw_init(void)
111 return ret; 63 return ret;
112 64
113 /* Register hooks */ 65 /* Register hooks */
114 ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 66 rawtable_ops = xt_hook_link(&packet_raw, ip6table_raw_hook);
115 if (ret < 0) 67 if (IS_ERR(rawtable_ops)) {
68 ret = PTR_ERR(rawtable_ops);
116 goto cleanup_table; 69 goto cleanup_table;
70 }
117 71
118 return ret; 72 return ret;
119 73
@@ -124,7 +78,7 @@ static int __init ip6table_raw_init(void)
124 78
125static void __exit ip6table_raw_fini(void) 79static void __exit ip6table_raw_fini(void)
126{ 80{
127 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 81 xt_hook_unlink(&packet_raw, rawtable_ops);
128 unregister_pernet_subsys(&ip6table_raw_net_ops); 82 unregister_pernet_subsys(&ip6table_raw_net_ops);
129} 83}
130 84
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
index 41b444c6093..0824d865aa9 100644
--- a/net/ipv6/netfilter/ip6table_security.c
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -26,106 +26,37 @@ MODULE_DESCRIPTION("ip6tables security table, for MAC rules");
26 (1 << NF_INET_FORWARD) | \ 26 (1 << NF_INET_FORWARD) | \
27 (1 << NF_INET_LOCAL_OUT) 27 (1 << NF_INET_LOCAL_OUT)
28 28
29static const struct
30{
31 struct ip6t_replace repl;
32 struct ip6t_standard entries[3];
33 struct ip6t_error term;
34} initial_table __net_initdata = {
35 .repl = {
36 .name = "security",
37 .valid_hooks = SECURITY_VALID_HOOKS,
38 .num_entries = 4,
39 .size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error),
40 .hook_entry = {
41 [NF_INET_LOCAL_IN] = 0,
42 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
43 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2,
44 },
45 .underflow = {
46 [NF_INET_LOCAL_IN] = 0,
47 [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
48 [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2,
49 },
50 },
51 .entries = {
52 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
53 IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
54 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
55 },
56 .term = IP6T_ERROR_INIT, /* ERROR */
57};
58
59static const struct xt_table security_table = { 29static const struct xt_table security_table = {
60 .name = "security", 30 .name = "security",
61 .valid_hooks = SECURITY_VALID_HOOKS, 31 .valid_hooks = SECURITY_VALID_HOOKS,
62 .me = THIS_MODULE, 32 .me = THIS_MODULE,
63 .af = NFPROTO_IPV6, 33 .af = NFPROTO_IPV6,
34 .priority = NF_IP6_PRI_SECURITY,
64}; 35};
65 36
66static unsigned int 37static unsigned int
67ip6t_local_in_hook(unsigned int hook, 38ip6table_security_hook(unsigned int hook, struct sk_buff *skb,
68 struct sk_buff *skb, 39 const struct net_device *in,
69 const struct net_device *in, 40 const struct net_device *out,
70 const struct net_device *out, 41 int (*okfn)(struct sk_buff *))
71 int (*okfn)(struct sk_buff *))
72{
73 return ip6t_do_table(skb, hook, in, out,
74 dev_net(in)->ipv6.ip6table_security);
75}
76
77static unsigned int
78ip6t_forward_hook(unsigned int hook,
79 struct sk_buff *skb,
80 const struct net_device *in,
81 const struct net_device *out,
82 int (*okfn)(struct sk_buff *))
83{ 42{
84 return ip6t_do_table(skb, hook, in, out, 43 const struct net *net = dev_net((in != NULL) ? in : out);
85 dev_net(in)->ipv6.ip6table_security);
86}
87 44
88static unsigned int 45 return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_security);
89ip6t_local_out_hook(unsigned int hook,
90 struct sk_buff *skb,
91 const struct net_device *in,
92 const struct net_device *out,
93 int (*okfn)(struct sk_buff *))
94{
95 /* TBD: handle short packets via raw socket */
96 return ip6t_do_table(skb, hook, in, out,
97 dev_net(out)->ipv6.ip6table_security);
98} 46}
99 47
100static struct nf_hook_ops ip6t_ops[] __read_mostly = { 48static struct nf_hook_ops *sectbl_ops __read_mostly;
101 {
102 .hook = ip6t_local_in_hook,
103 .owner = THIS_MODULE,
104 .pf = NFPROTO_IPV6,
105 .hooknum = NF_INET_LOCAL_IN,
106 .priority = NF_IP6_PRI_SECURITY,
107 },
108 {
109 .hook = ip6t_forward_hook,
110 .owner = THIS_MODULE,
111 .pf = NFPROTO_IPV6,
112 .hooknum = NF_INET_FORWARD,
113 .priority = NF_IP6_PRI_SECURITY,
114 },
115 {
116 .hook = ip6t_local_out_hook,
117 .owner = THIS_MODULE,
118 .pf = NFPROTO_IPV6,
119 .hooknum = NF_INET_LOCAL_OUT,
120 .priority = NF_IP6_PRI_SECURITY,
121 },
122};
123 49
124static int __net_init ip6table_security_net_init(struct net *net) 50static int __net_init ip6table_security_net_init(struct net *net)
125{ 51{
126 net->ipv6.ip6table_security = 52 struct ip6t_replace *repl;
127 ip6t_register_table(net, &security_table, &initial_table.repl);
128 53
54 repl = ip6t_alloc_initial_table(&security_table);
55 if (repl == NULL)
56 return -ENOMEM;
57 net->ipv6.ip6table_security =
58 ip6t_register_table(net, &security_table, repl);
59 kfree(repl);
129 if (IS_ERR(net->ipv6.ip6table_security)) 60 if (IS_ERR(net->ipv6.ip6table_security))
130 return PTR_ERR(net->ipv6.ip6table_security); 61 return PTR_ERR(net->ipv6.ip6table_security);
131 62
@@ -134,7 +65,7 @@ static int __net_init ip6table_security_net_init(struct net *net)
134 65
135static void __net_exit ip6table_security_net_exit(struct net *net) 66static void __net_exit ip6table_security_net_exit(struct net *net)
136{ 67{
137 ip6t_unregister_table(net->ipv6.ip6table_security); 68 ip6t_unregister_table(net, net->ipv6.ip6table_security);
138} 69}
139 70
140static struct pernet_operations ip6table_security_net_ops = { 71static struct pernet_operations ip6table_security_net_ops = {
@@ -150,9 +81,11 @@ static int __init ip6table_security_init(void)
150 if (ret < 0) 81 if (ret < 0)
151 return ret; 82 return ret;
152 83
153 ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 84 sectbl_ops = xt_hook_link(&security_table, ip6table_security_hook);
154 if (ret < 0) 85 if (IS_ERR(sectbl_ops)) {
86 ret = PTR_ERR(sectbl_ops);
155 goto cleanup_table; 87 goto cleanup_table;
88 }
156 89
157 return ret; 90 return ret;
158 91
@@ -163,7 +96,7 @@ cleanup_table:
163 96
164static void __exit ip6table_security_fini(void) 97static void __exit ip6table_security_fini(void)
165{ 98{
166 nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops)); 99 xt_hook_unlink(&security_table, sectbl_ops);
167 unregister_pernet_subsys(&ip6table_security_net_ops); 100 unregister_pernet_subsys(&ip6table_security_net_ops);
168} 101}
169 102
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 0956ebabbff..996c3f41fec 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -27,6 +27,7 @@
27#include <net/netfilter/nf_conntrack_l4proto.h> 27#include <net/netfilter/nf_conntrack_l4proto.h>
28#include <net/netfilter/nf_conntrack_l3proto.h> 28#include <net/netfilter/nf_conntrack_l3proto.h>
29#include <net/netfilter/nf_conntrack_core.h> 29#include <net/netfilter/nf_conntrack_core.h>
30#include <net/netfilter/nf_conntrack_zones.h>
30#include <net/netfilter/ipv6/nf_conntrack_ipv6.h> 31#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
31#include <net/netfilter/nf_log.h> 32#include <net/netfilter/nf_log.h>
32 33
@@ -191,15 +192,20 @@ out:
191static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, 192static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
192 struct sk_buff *skb) 193 struct sk_buff *skb)
193{ 194{
195 u16 zone = NF_CT_DEFAULT_ZONE;
196
197 if (skb->nfct)
198 zone = nf_ct_zone((struct nf_conn *)skb->nfct);
199
194#ifdef CONFIG_BRIDGE_NETFILTER 200#ifdef CONFIG_BRIDGE_NETFILTER
195 if (skb->nf_bridge && 201 if (skb->nf_bridge &&
196 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) 202 skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
197 return IP6_DEFRAG_CONNTRACK_BRIDGE_IN; 203 return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
198#endif 204#endif
199 if (hooknum == NF_INET_PRE_ROUTING) 205 if (hooknum == NF_INET_PRE_ROUTING)
200 return IP6_DEFRAG_CONNTRACK_IN; 206 return IP6_DEFRAG_CONNTRACK_IN + zone;
201 else 207 else
202 return IP6_DEFRAG_CONNTRACK_OUT; 208 return IP6_DEFRAG_CONNTRACK_OUT + zone;
203 209
204} 210}
205 211
@@ -212,7 +218,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
212 struct sk_buff *reasm; 218 struct sk_buff *reasm;
213 219
214 /* Previously seen (loopback)? */ 220 /* Previously seen (loopback)? */
215 if (skb->nfct) 221 if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
216 return NF_ACCEPT; 222 return NF_ACCEPT;
217 223
218 reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb)); 224 reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index c7b8bd1d798..9be81776415 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -23,6 +23,7 @@
23#include <net/netfilter/nf_conntrack_tuple.h> 23#include <net/netfilter/nf_conntrack_tuple.h>
24#include <net/netfilter/nf_conntrack_l4proto.h> 24#include <net/netfilter/nf_conntrack_l4proto.h>
25#include <net/netfilter/nf_conntrack_core.h> 25#include <net/netfilter/nf_conntrack_core.h>
26#include <net/netfilter/nf_conntrack_zones.h>
26#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h> 27#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
27#include <net/netfilter/nf_log.h> 28#include <net/netfilter/nf_log.h>
28 29
@@ -128,7 +129,7 @@ static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
128} 129}
129 130
130static int 131static int
131icmpv6_error_message(struct net *net, 132icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
132 struct sk_buff *skb, 133 struct sk_buff *skb,
133 unsigned int icmp6off, 134 unsigned int icmp6off,
134 enum ip_conntrack_info *ctinfo, 135 enum ip_conntrack_info *ctinfo,
@@ -137,6 +138,7 @@ icmpv6_error_message(struct net *net,
137 struct nf_conntrack_tuple intuple, origtuple; 138 struct nf_conntrack_tuple intuple, origtuple;
138 const struct nf_conntrack_tuple_hash *h; 139 const struct nf_conntrack_tuple_hash *h;
139 const struct nf_conntrack_l4proto *inproto; 140 const struct nf_conntrack_l4proto *inproto;
141 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
140 142
141 NF_CT_ASSERT(skb->nfct == NULL); 143 NF_CT_ASSERT(skb->nfct == NULL);
142 144
@@ -163,7 +165,7 @@ icmpv6_error_message(struct net *net,
163 165
164 *ctinfo = IP_CT_RELATED; 166 *ctinfo = IP_CT_RELATED;
165 167
166 h = nf_conntrack_find_get(net, &intuple); 168 h = nf_conntrack_find_get(net, zone, &intuple);
167 if (!h) { 169 if (!h) {
168 pr_debug("icmpv6_error: no match\n"); 170 pr_debug("icmpv6_error: no match\n");
169 return -NF_ACCEPT; 171 return -NF_ACCEPT;
@@ -179,7 +181,8 @@ icmpv6_error_message(struct net *net,
179} 181}
180 182
181static int 183static int
182icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff, 184icmpv6_error(struct net *net, struct nf_conn *tmpl,
185 struct sk_buff *skb, unsigned int dataoff,
183 enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) 186 enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum)
184{ 187{
185 const struct icmp6hdr *icmp6h; 188 const struct icmp6hdr *icmp6h;
@@ -215,7 +218,7 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
215 if (icmp6h->icmp6_type >= 128) 218 if (icmp6h->icmp6_type >= 128)
216 return NF_ACCEPT; 219 return NF_ACCEPT;
217 220
218 return icmpv6_error_message(net, skb, dataoff, ctinfo, hooknum); 221 return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum);
219} 222}
220 223
221#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 224#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 624a54832a7..f1171b74465 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -45,9 +45,6 @@
45#include <linux/kernel.h> 45#include <linux/kernel.h>
46#include <linux/module.h> 46#include <linux/module.h>
47 47
48#define NF_CT_FRAG6_HIGH_THRESH 262144 /* == 256*1024 */
49#define NF_CT_FRAG6_LOW_THRESH 196608 /* == 192*1024 */
50#define NF_CT_FRAG6_TIMEOUT IPV6_FRAG_TIMEOUT
51 48
52struct nf_ct_frag6_skb_cb 49struct nf_ct_frag6_skb_cb
53{ 50{
@@ -472,7 +469,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
472 469
473 /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */ 470 /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */
474 fp = skb_shinfo(head)->frag_list; 471 fp = skb_shinfo(head)->frag_list;
475 if (NFCT_FRAG6_CB(fp)->orig == NULL) 472 if (fp && NFCT_FRAG6_CB(fp)->orig == NULL)
476 /* at above code, head skb is divided into two skbs. */ 473 /* at above code, head skb is divided into two skbs. */
477 fp = fp->next; 474 fp = fp->next;
478 475
@@ -598,12 +595,6 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
598 hdr = ipv6_hdr(clone); 595 hdr = ipv6_hdr(clone);
599 fhdr = (struct frag_hdr *)skb_transport_header(clone); 596 fhdr = (struct frag_hdr *)skb_transport_header(clone);
600 597
601 if (!(fhdr->frag_off & htons(0xFFF9))) {
602 pr_debug("Invalid fragment offset\n");
603 /* It is not a fragmented frame */
604 goto ret_orig;
605 }
606
607 if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh) 598 if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
608 nf_ct_frag6_evictor(); 599 nf_ct_frag6_evictor();
609 600
@@ -670,8 +661,8 @@ int nf_ct_frag6_init(void)
670 nf_frags.frag_expire = nf_ct_frag6_expire; 661 nf_frags.frag_expire = nf_ct_frag6_expire;
671 nf_frags.secret_interval = 10 * 60 * HZ; 662 nf_frags.secret_interval = 10 * 60 * HZ;
672 nf_init_frags.timeout = IPV6_FRAG_TIMEOUT; 663 nf_init_frags.timeout = IPV6_FRAG_TIMEOUT;
673 nf_init_frags.high_thresh = 256 * 1024; 664 nf_init_frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
674 nf_init_frags.low_thresh = 192 * 1024; 665 nf_init_frags.low_thresh = IPV6_FRAG_LOW_THRESH;
675 inet_frags_init_net(&nf_init_frags); 666 inet_frags_init_net(&nf_init_frags);
676 inet_frags_init(&nf_frags); 667 inet_frags_init(&nf_frags);
677 668
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index c9605c3ad91..58344c0fbd1 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -59,7 +59,7 @@ static const struct file_operations sockstat6_seq_fops = {
59 .release = single_release_net, 59 .release = single_release_net,
60}; 60};
61 61
62static struct snmp_mib snmp6_ipstats_list[] = { 62static const struct snmp_mib snmp6_ipstats_list[] = {
63/* ipv6 mib according to RFC 2465 */ 63/* ipv6 mib according to RFC 2465 */
64 SNMP_MIB_ITEM("Ip6InReceives", IPSTATS_MIB_INPKTS), 64 SNMP_MIB_ITEM("Ip6InReceives", IPSTATS_MIB_INPKTS),
65 SNMP_MIB_ITEM("Ip6InHdrErrors", IPSTATS_MIB_INHDRERRORS), 65 SNMP_MIB_ITEM("Ip6InHdrErrors", IPSTATS_MIB_INHDRERRORS),
@@ -92,7 +92,7 @@ static struct snmp_mib snmp6_ipstats_list[] = {
92 SNMP_MIB_SENTINEL 92 SNMP_MIB_SENTINEL
93}; 93};
94 94
95static struct snmp_mib snmp6_icmp6_list[] = { 95static const struct snmp_mib snmp6_icmp6_list[] = {
96/* icmpv6 mib according to RFC 2466 */ 96/* icmpv6 mib according to RFC 2466 */
97 SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS), 97 SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS),
98 SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS), 98 SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS),
@@ -120,7 +120,7 @@ static const char *const icmp6type2name[256] = {
120}; 120};
121 121
122 122
123static struct snmp_mib snmp6_udp6_list[] = { 123static const struct snmp_mib snmp6_udp6_list[] = {
124 SNMP_MIB_ITEM("Udp6InDatagrams", UDP_MIB_INDATAGRAMS), 124 SNMP_MIB_ITEM("Udp6InDatagrams", UDP_MIB_INDATAGRAMS),
125 SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS), 125 SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS),
126 SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS), 126 SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS),
@@ -128,7 +128,7 @@ static struct snmp_mib snmp6_udp6_list[] = {
128 SNMP_MIB_SENTINEL 128 SNMP_MIB_SENTINEL
129}; 129};
130 130
131static struct snmp_mib snmp6_udplite6_list[] = { 131static const struct snmp_mib snmp6_udplite6_list[] = {
132 SNMP_MIB_ITEM("UdpLite6InDatagrams", UDP_MIB_INDATAGRAMS), 132 SNMP_MIB_ITEM("UdpLite6InDatagrams", UDP_MIB_INDATAGRAMS),
133 SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS), 133 SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS),
134 SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS), 134 SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS),
@@ -136,7 +136,7 @@ static struct snmp_mib snmp6_udplite6_list[] = {
136 SNMP_MIB_SENTINEL 136 SNMP_MIB_SENTINEL
137}; 137};
138 138
139static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib) 139static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **mib)
140{ 140{
141 char name[32]; 141 char name[32];
142 int i; 142 int i;
@@ -170,8 +170,8 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib)
170 return; 170 return;
171} 171}
172 172
173static inline void 173static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **mib,
174snmp6_seq_show_item(struct seq_file *seq, void **mib, struct snmp_mib *itemlist) 174 const struct snmp_mib *itemlist)
175{ 175{
176 int i; 176 int i;
177 for (i=0; itemlist[i].name; i++) 177 for (i=0; itemlist[i].name; i++)
@@ -183,14 +183,15 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
183{ 183{
184 struct net *net = (struct net *)seq->private; 184 struct net *net = (struct net *)seq->private;
185 185
186 snmp6_seq_show_item(seq, (void **)net->mib.ipv6_statistics, 186 snmp6_seq_show_item(seq, (void __percpu **)net->mib.ipv6_statistics,
187 snmp6_ipstats_list); 187 snmp6_ipstats_list);
188 snmp6_seq_show_item(seq, (void **)net->mib.icmpv6_statistics, 188 snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics,
189 snmp6_icmp6_list); 189 snmp6_icmp6_list);
190 snmp6_seq_show_icmpv6msg(seq, (void **)net->mib.icmpv6msg_statistics); 190 snmp6_seq_show_icmpv6msg(seq,
191 snmp6_seq_show_item(seq, (void **)net->mib.udp_stats_in6, 191 (void __percpu **)net->mib.icmpv6msg_statistics);
192 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6,
192 snmp6_udp6_list); 193 snmp6_udp6_list);
193 snmp6_seq_show_item(seq, (void **)net->mib.udplite_stats_in6, 194 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6,
194 snmp6_udplite6_list); 195 snmp6_udplite6_list);
195 return 0; 196 return 0;
196} 197}
@@ -213,9 +214,11 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
213 struct inet6_dev *idev = (struct inet6_dev *)seq->private; 214 struct inet6_dev *idev = (struct inet6_dev *)seq->private;
214 215
215 seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex); 216 seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex);
216 snmp6_seq_show_item(seq, (void **)idev->stats.ipv6, snmp6_ipstats_list); 217 snmp6_seq_show_item(seq, (void __percpu **)idev->stats.ipv6,
217 snmp6_seq_show_item(seq, (void **)idev->stats.icmpv6, snmp6_icmp6_list); 218 snmp6_ipstats_list);
218 snmp6_seq_show_icmpv6msg(seq, (void **)idev->stats.icmpv6msg); 219 snmp6_seq_show_item(seq, (void __percpu **)idev->stats.icmpv6,
220 snmp6_icmp6_list);
221 snmp6_seq_show_icmpv6msg(seq, (void __percpu **)idev->stats.icmpv6msg);
219 return 0; 222 return 0;
220} 223}
221 224
@@ -259,7 +262,7 @@ int snmp6_unregister_dev(struct inet6_dev *idev)
259 struct net *net = dev_net(idev->dev); 262 struct net *net = dev_net(idev->dev);
260 if (!net->mib.proc_net_devsnmp6) 263 if (!net->mib.proc_net_devsnmp6)
261 return -ENOENT; 264 return -ENOENT;
262 if (!idev || !idev->stats.proc_dir_entry) 265 if (!idev->stats.proc_dir_entry)
263 return -EINVAL; 266 return -EINVAL;
264 remove_proc_entry(idev->stats.proc_dir_entry->name, 267 remove_proc_entry(idev->stats.proc_dir_entry->name,
265 net->mib.proc_net_devsnmp6); 268 net->mib.proc_net_devsnmp6);
@@ -267,7 +270,7 @@ int snmp6_unregister_dev(struct inet6_dev *idev)
267 return 0; 270 return 0;
268} 271}
269 272
270static int ipv6_proc_init_net(struct net *net) 273static int __net_init ipv6_proc_init_net(struct net *net)
271{ 274{
272 if (!proc_net_fops_create(net, "sockstat6", S_IRUGO, 275 if (!proc_net_fops_create(net, "sockstat6", S_IRUGO,
273 &sockstat6_seq_fops)) 276 &sockstat6_seq_fops))
@@ -288,7 +291,7 @@ proc_dev_snmp6_fail:
288 return -ENOMEM; 291 return -ENOMEM;
289} 292}
290 293
291static void ipv6_proc_exit_net(struct net *net) 294static void __net_exit ipv6_proc_exit_net(struct net *net)
292{ 295{
293 proc_net_remove(net, "sockstat6"); 296 proc_net_remove(net, "sockstat6");
294 proc_net_remove(net, "dev_snmp6"); 297 proc_net_remove(net, "dev_snmp6");
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 926ce8eeffa..ed31c37c6e3 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1275,7 +1275,7 @@ static const struct file_operations raw6_seq_fops = {
1275 .release = seq_release_net, 1275 .release = seq_release_net,
1276}; 1276};
1277 1277
1278static int raw6_init_net(struct net *net) 1278static int __net_init raw6_init_net(struct net *net)
1279{ 1279{
1280 if (!proc_net_fops_create(net, "raw6", S_IRUGO, &raw6_seq_fops)) 1280 if (!proc_net_fops_create(net, "raw6", S_IRUGO, &raw6_seq_fops))
1281 return -ENOMEM; 1281 return -ENOMEM;
@@ -1283,7 +1283,7 @@ static int raw6_init_net(struct net *net)
1283 return 0; 1283 return 0;
1284} 1284}
1285 1285
1286static void raw6_exit_net(struct net *net) 1286static void __net_exit raw6_exit_net(struct net *net)
1287{ 1287{
1288 proc_net_remove(net, "raw6"); 1288 proc_net_remove(net, "raw6");
1289} 1289}
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 2cddea3bd6b..a555156e977 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -228,7 +228,7 @@ static void ip6_frag_expire(unsigned long data)
228 pointer directly, device might already disappeared. 228 pointer directly, device might already disappeared.
229 */ 229 */
230 fq->q.fragments->dev = dev; 230 fq->q.fragments->dev = dev;
231 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); 231 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
232out_rcu_unlock: 232out_rcu_unlock:
233 rcu_read_unlock(); 233 rcu_read_unlock();
234out: 234out:
@@ -237,8 +237,7 @@ out:
237} 237}
238 238
239static __inline__ struct frag_queue * 239static __inline__ struct frag_queue *
240fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst, 240fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst)
241 struct inet6_dev *idev)
242{ 241{
243 struct inet_frag_queue *q; 242 struct inet_frag_queue *q;
244 struct ip6_create_arg arg; 243 struct ip6_create_arg arg;
@@ -254,13 +253,9 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
254 253
255 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); 254 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
256 if (q == NULL) 255 if (q == NULL)
257 goto oom; 256 return NULL;
258 257
259 return container_of(q, struct frag_queue, q); 258 return container_of(q, struct frag_queue, q);
260
261oom:
262 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS);
263 return NULL;
264} 259}
265 260
266static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, 261static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
@@ -606,8 +601,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
606 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) 601 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
607 ip6_evictor(net, ip6_dst_idev(skb_dst(skb))); 602 ip6_evictor(net, ip6_dst_idev(skb_dst(skb)));
608 603
609 if ((fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, 604 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
610 ip6_dst_idev(skb_dst(skb)))) != NULL) { 605 if (fq != NULL) {
611 int ret; 606 int ret;
612 607
613 spin_lock(&fq->q.lock); 608 spin_lock(&fq->q.lock);
@@ -672,7 +667,7 @@ static struct ctl_table ip6_frags_ctl_table[] = {
672 { } 667 { }
673}; 668};
674 669
675static int ip6_frags_ns_sysctl_register(struct net *net) 670static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
676{ 671{
677 struct ctl_table *table; 672 struct ctl_table *table;
678 struct ctl_table_header *hdr; 673 struct ctl_table_header *hdr;
@@ -702,7 +697,7 @@ err_alloc:
702 return -ENOMEM; 697 return -ENOMEM;
703} 698}
704 699
705static void ip6_frags_ns_sysctl_unregister(struct net *net) 700static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
706{ 701{
707 struct ctl_table *table; 702 struct ctl_table *table;
708 703
@@ -745,10 +740,10 @@ static inline void ip6_frags_sysctl_unregister(void)
745} 740}
746#endif 741#endif
747 742
748static int ipv6_frags_init_net(struct net *net) 743static int __net_init ipv6_frags_init_net(struct net *net)
749{ 744{
750 net->ipv6.frags.high_thresh = 256 * 1024; 745 net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
751 net->ipv6.frags.low_thresh = 192 * 1024; 746 net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
752 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT; 747 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
753 748
754 inet_frags_init_net(&net->ipv6.frags); 749 inet_frags_init_net(&net->ipv6.frags);
@@ -756,7 +751,7 @@ static int ipv6_frags_init_net(struct net *net)
756 return ip6_frags_ns_sysctl_register(net); 751 return ip6_frags_ns_sysctl_register(net);
757} 752}
758 753
759static void ipv6_frags_exit_net(struct net *net) 754static void __net_exit ipv6_frags_exit_net(struct net *net)
760{ 755{
761 ip6_frags_ns_sysctl_unregister(net); 756 ip6_frags_ns_sysctl_unregister(net);
762 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); 757 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c2bd74c5f8d..88c0a5c49ae 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -909,7 +909,7 @@ static void ip6_link_failure(struct sk_buff *skb)
909{ 909{
910 struct rt6_info *rt; 910 struct rt6_info *rt;
911 911
912 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev); 912 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
913 913
914 rt = (struct rt6_info *) skb_dst(skb); 914 rt = (struct rt6_info *) skb_dst(skb);
915 if (rt) { 915 if (rt) {
@@ -1884,7 +1884,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
1884 ipstats_mib_noroutes); 1884 ipstats_mib_noroutes);
1885 break; 1885 break;
1886 } 1886 }
1887 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0, skb->dev); 1887 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
1888 kfree_skb(skb); 1888 kfree_skb(skb);
1889 return 0; 1889 return 0;
1890} 1890}
@@ -2612,7 +2612,7 @@ ctl_table ipv6_route_table_template[] = {
2612 { } 2612 { }
2613}; 2613};
2614 2614
2615struct ctl_table *ipv6_route_sysctl_init(struct net *net) 2615struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2616{ 2616{
2617 struct ctl_table *table; 2617 struct ctl_table *table;
2618 2618
@@ -2637,7 +2637,7 @@ struct ctl_table *ipv6_route_sysctl_init(struct net *net)
2637} 2637}
2638#endif 2638#endif
2639 2639
2640static int ip6_route_net_init(struct net *net) 2640static int __net_init ip6_route_net_init(struct net *net)
2641{ 2641{
2642 int ret = -ENOMEM; 2642 int ret = -ENOMEM;
2643 2643
@@ -2702,7 +2702,7 @@ out_ip6_dst_ops:
2702 goto out; 2702 goto out;
2703} 2703}
2704 2704
2705static void ip6_route_net_exit(struct net *net) 2705static void __net_exit ip6_route_net_exit(struct net *net)
2706{ 2706{
2707#ifdef CONFIG_PROC_FS 2707#ifdef CONFIG_PROC_FS
2708 proc_net_remove(net, "ipv6_route"); 2708 proc_net_remove(net, "ipv6_route");
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 976e68244b9..b1eea811be4 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -62,7 +62,6 @@
62#define HASH_SIZE 16 62#define HASH_SIZE 16
63#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) 63#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
64 64
65static void ipip6_fb_tunnel_init(struct net_device *dev);
66static void ipip6_tunnel_init(struct net_device *dev); 65static void ipip6_tunnel_init(struct net_device *dev);
67static void ipip6_tunnel_setup(struct net_device *dev); 66static void ipip6_tunnel_setup(struct net_device *dev);
68 67
@@ -364,7 +363,6 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
364 goto out; 363 goto out;
365 } 364 }
366 365
367 INIT_RCU_HEAD(&p->rcu_head);
368 p->next = t->prl; 366 p->next = t->prl;
369 p->addr = a->addr; 367 p->addr = a->addr;
370 p->flags = a->flags; 368 p->flags = a->flags;
@@ -745,7 +743,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
745 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); 743 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
746 744
747 if (skb->len > mtu) { 745 if (skb->len > mtu) {
748 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); 746 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
749 ip_rt_put(rt); 747 ip_rt_put(rt);
750 goto tx_error; 748 goto tx_error;
751 } 749 }
@@ -1120,7 +1118,7 @@ static void ipip6_tunnel_init(struct net_device *dev)
1120 ipip6_tunnel_bind_dev(dev); 1118 ipip6_tunnel_bind_dev(dev);
1121} 1119}
1122 1120
1123static void ipip6_fb_tunnel_init(struct net_device *dev) 1121static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
1124{ 1122{
1125 struct ip_tunnel *tunnel = netdev_priv(dev); 1123 struct ip_tunnel *tunnel = netdev_priv(dev);
1126 struct iphdr *iph = &tunnel->parms.iph; 1124 struct iphdr *iph = &tunnel->parms.iph;
@@ -1145,7 +1143,7 @@ static struct xfrm_tunnel sit_handler = {
1145 .priority = 1, 1143 .priority = 1,
1146}; 1144};
1147 1145
1148static void sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head) 1146static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
1149{ 1147{
1150 int prio; 1148 int prio;
1151 1149
@@ -1162,7 +1160,7 @@ static void sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
1162 } 1160 }
1163} 1161}
1164 1162
1165static int sit_init_net(struct net *net) 1163static int __net_init sit_init_net(struct net *net)
1166{ 1164{
1167 struct sit_net *sitn = net_generic(net, sit_net_id); 1165 struct sit_net *sitn = net_generic(net, sit_net_id);
1168 int err; 1166 int err;
@@ -1195,7 +1193,7 @@ err_alloc_dev:
1195 return err; 1193 return err;
1196} 1194}
1197 1195
1198static void sit_exit_net(struct net *net) 1196static void __net_exit sit_exit_net(struct net *net)
1199{ 1197{
1200 struct sit_net *sitn = net_generic(net, sit_net_id); 1198 struct sit_net *sitn = net_generic(net, sit_net_id);
1201 LIST_HEAD(list); 1199 LIST_HEAD(list);
@@ -1228,15 +1226,14 @@ static int __init sit_init(void)
1228 1226
1229 printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n"); 1227 printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n");
1230 1228
1231 if (xfrm4_tunnel_register(&sit_handler, AF_INET6) < 0) {
1232 printk(KERN_INFO "sit init: Can't add protocol\n");
1233 return -EAGAIN;
1234 }
1235
1236 err = register_pernet_device(&sit_net_ops); 1229 err = register_pernet_device(&sit_net_ops);
1237 if (err < 0) 1230 if (err < 0)
1238 xfrm4_tunnel_deregister(&sit_handler, AF_INET6); 1231 return err;
1239 1232 err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
1233 if (err < 0) {
1234 unregister_pernet_device(&sit_net_ops);
1235 printk(KERN_INFO "sit init: Can't add protocol\n");
1236 }
1240 return err; 1237 return err;
1241} 1238}
1242 1239
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 7208a06576c..34d1f0690d7 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -269,7 +269,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
269 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); 269 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
270 tcp_select_initial_window(tcp_full_space(sk), req->mss, 270 tcp_select_initial_window(tcp_full_space(sk), req->mss,
271 &req->rcv_wnd, &req->window_clamp, 271 &req->rcv_wnd, &req->window_clamp,
272 ireq->wscale_ok, &rcv_wscale); 272 ireq->wscale_ok, &rcv_wscale,
273 dst_metric(dst, RTAX_INITRWND));
273 274
274 ireq->rcv_wscale = rcv_wscale; 275 ireq->rcv_wscale = rcv_wscale;
275 276
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index c690736885b..f841d93bf98 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -55,7 +55,7 @@ struct ctl_path net_ipv6_ctl_path[] = {
55}; 55};
56EXPORT_SYMBOL_GPL(net_ipv6_ctl_path); 56EXPORT_SYMBOL_GPL(net_ipv6_ctl_path);
57 57
58static int ipv6_sysctl_net_init(struct net *net) 58static int __net_init ipv6_sysctl_net_init(struct net *net)
59{ 59{
60 struct ctl_table *ipv6_table; 60 struct ctl_table *ipv6_table;
61 struct ctl_table *ipv6_route_table; 61 struct ctl_table *ipv6_route_table;
@@ -98,7 +98,7 @@ out_ipv6_table:
98 goto out; 98 goto out;
99} 99}
100 100
101static void ipv6_sysctl_net_exit(struct net *net) 101static void __net_exit ipv6_sysctl_net_exit(struct net *net)
102{ 102{
103 struct ctl_table *ipv6_table; 103 struct ctl_table *ipv6_table;
104 struct ctl_table *ipv6_route_table; 104 struct ctl_table *ipv6_route_table;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index febfd595a40..6963a6b6763 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -520,6 +520,13 @@ done:
520 return err; 520 return err;
521} 521}
522 522
523static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
524 struct request_values *rvp)
525{
526 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
527 return tcp_v6_send_synack(sk, req, rvp);
528}
529
523static inline void syn_flood_warning(struct sk_buff *skb) 530static inline void syn_flood_warning(struct sk_buff *skb)
524{ 531{
525#ifdef CONFIG_SYN_COOKIES 532#ifdef CONFIG_SYN_COOKIES
@@ -876,7 +883,7 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
876 883
877 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 884 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
878 if (net_ratelimit()) { 885 if (net_ratelimit()) {
879 printk(KERN_INFO "MD5 Hash %s for (%pI6, %u)->(%pI6, %u)\n", 886 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
880 genhash ? "failed" : "mismatch", 887 genhash ? "failed" : "mismatch",
881 &ip6h->saddr, ntohs(th->source), 888 &ip6h->saddr, ntohs(th->source),
882 &ip6h->daddr, ntohs(th->dest)); 889 &ip6h->daddr, ntohs(th->dest));
@@ -890,10 +897,11 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
890struct request_sock_ops tcp6_request_sock_ops __read_mostly = { 897struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
891 .family = AF_INET6, 898 .family = AF_INET6,
892 .obj_size = sizeof(struct tcp6_request_sock), 899 .obj_size = sizeof(struct tcp6_request_sock),
893 .rtx_syn_ack = tcp_v6_send_synack, 900 .rtx_syn_ack = tcp_v6_rtx_synack,
894 .send_ack = tcp_v6_reqsk_send_ack, 901 .send_ack = tcp_v6_reqsk_send_ack,
895 .destructor = tcp_v6_reqsk_destructor, 902 .destructor = tcp_v6_reqsk_destructor,
896 .send_reset = tcp_v6_send_reset 903 .send_reset = tcp_v6_send_reset,
904 .syn_ack_timeout = tcp_syn_ack_timeout,
897}; 905};
898 906
899#ifdef CONFIG_TCP_MD5SIG 907#ifdef CONFIG_TCP_MD5SIG
@@ -2105,7 +2113,7 @@ static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2105 }, 2113 },
2106}; 2114};
2107 2115
2108int tcp6_proc_init(struct net *net) 2116int __net_init tcp6_proc_init(struct net *net)
2109{ 2117{
2110 return tcp_proc_register(net, &tcp6_seq_afinfo); 2118 return tcp_proc_register(net, &tcp6_seq_afinfo);
2111} 2119}
@@ -2174,18 +2182,18 @@ static struct inet_protosw tcpv6_protosw = {
2174 INET_PROTOSW_ICSK, 2182 INET_PROTOSW_ICSK,
2175}; 2183};
2176 2184
2177static int tcpv6_net_init(struct net *net) 2185static int __net_init tcpv6_net_init(struct net *net)
2178{ 2186{
2179 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6, 2187 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2180 SOCK_RAW, IPPROTO_TCP, net); 2188 SOCK_RAW, IPPROTO_TCP, net);
2181} 2189}
2182 2190
2183static void tcpv6_net_exit(struct net *net) 2191static void __net_exit tcpv6_net_exit(struct net *net)
2184{ 2192{
2185 inet_ctl_sock_destroy(net->ipv6.tcp_sk); 2193 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2186} 2194}
2187 2195
2188static void tcpv6_net_exit_batch(struct list_head *net_exit_list) 2196static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2189{ 2197{
2190 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6); 2198 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2191} 2199}
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 51e2832d13a..e17bc1dfc1a 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -98,7 +98,7 @@ static int tunnel6_rcv(struct sk_buff *skb)
98 if (!handler->handler(skb)) 98 if (!handler->handler(skb))
99 return 0; 99 return 0;
100 100
101 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, skb->dev); 101 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
102 102
103drop: 103drop:
104 kfree_skb(skb); 104 kfree_skb(skb);
@@ -116,7 +116,7 @@ static int tunnel46_rcv(struct sk_buff *skb)
116 if (!handler->handler(skb)) 116 if (!handler->handler(skb))
117 return 0; 117 return 0;
118 118
119 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, skb->dev); 119 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
120 120
121drop: 121drop:
122 kfree_skb(skb); 122 kfree_skb(skb);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 69ebdbe78c4..52b8347ae3b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -322,7 +322,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
322 struct ipv6_pinfo *np = inet6_sk(sk); 322 struct ipv6_pinfo *np = inet6_sk(sk);
323 struct inet_sock *inet = inet_sk(sk); 323 struct inet_sock *inet = inet_sk(sk);
324 struct sk_buff *skb; 324 struct sk_buff *skb;
325 unsigned int ulen, copied; 325 unsigned int ulen;
326 int peeked; 326 int peeked;
327 int err; 327 int err;
328 int is_udplite = IS_UDPLITE(sk); 328 int is_udplite = IS_UDPLITE(sk);
@@ -341,10 +341,9 @@ try_again:
341 goto out; 341 goto out;
342 342
343 ulen = skb->len - sizeof(struct udphdr); 343 ulen = skb->len - sizeof(struct udphdr);
344 copied = len; 344 if (len > ulen)
345 if (copied > ulen) 345 len = ulen;
346 copied = ulen; 346 else if (len < ulen)
347 else if (copied < ulen)
348 msg->msg_flags |= MSG_TRUNC; 347 msg->msg_flags |= MSG_TRUNC;
349 348
350 is_udp4 = (skb->protocol == htons(ETH_P_IP)); 349 is_udp4 = (skb->protocol == htons(ETH_P_IP));
@@ -355,14 +354,14 @@ try_again:
355 * coverage checksum (UDP-Lite), do it before the copy. 354 * coverage checksum (UDP-Lite), do it before the copy.
356 */ 355 */
357 356
358 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { 357 if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
359 if (udp_lib_checksum_complete(skb)) 358 if (udp_lib_checksum_complete(skb))
360 goto csum_copy_err; 359 goto csum_copy_err;
361 } 360 }
362 361
363 if (skb_csum_unnecessary(skb)) 362 if (skb_csum_unnecessary(skb))
364 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), 363 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
365 msg->msg_iov, copied ); 364 msg->msg_iov,len);
366 else { 365 else {
367 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); 366 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
368 if (err == -EINVAL) 367 if (err == -EINVAL)
@@ -411,7 +410,7 @@ try_again:
411 datagram_recv_ctl(sk, msg, skb); 410 datagram_recv_ctl(sk, msg, skb);
412 } 411 }
413 412
414 err = copied; 413 err = len;
415 if (flags & MSG_TRUNC) 414 if (flags & MSG_TRUNC)
416 err = ulen; 415 err = ulen;
417 416
@@ -681,12 +680,11 @@ static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh,
681int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 680int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
682 int proto) 681 int proto)
683{ 682{
683 struct net *net = dev_net(skb->dev);
684 struct sock *sk; 684 struct sock *sk;
685 struct udphdr *uh; 685 struct udphdr *uh;
686 struct net_device *dev = skb->dev;
687 struct in6_addr *saddr, *daddr; 686 struct in6_addr *saddr, *daddr;
688 u32 ulen = 0; 687 u32 ulen = 0;
689 struct net *net = dev_net(skb->dev);
690 688
691 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 689 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
692 goto short_packet; 690 goto short_packet;
@@ -745,7 +743,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
745 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, 743 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
746 proto == IPPROTO_UDPLITE); 744 proto == IPPROTO_UDPLITE);
747 745
748 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev); 746 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
749 747
750 kfree_skb(skb); 748 kfree_skb(skb);
751 return 0; 749 return 0;
@@ -1396,7 +1394,7 @@ static struct udp_seq_afinfo udp6_seq_afinfo = {
1396 }, 1394 },
1397}; 1395};
1398 1396
1399int udp6_proc_init(struct net *net) 1397int __net_init udp6_proc_init(struct net *net)
1400{ 1398{
1401 return udp_proc_register(net, &udp6_seq_afinfo); 1399 return udp_proc_register(net, &udp6_seq_afinfo);
1402} 1400}
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 6ea6938919e..5f48fadc27f 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -104,12 +104,12 @@ static struct udp_seq_afinfo udplite6_seq_afinfo = {
104 }, 104 },
105}; 105};
106 106
107static int udplite6_proc_init_net(struct net *net) 107static int __net_init udplite6_proc_init_net(struct net *net)
108{ 108{
109 return udp_proc_register(net, &udplite6_seq_afinfo); 109 return udp_proc_register(net, &udplite6_seq_afinfo);
110} 110}
111 111
112static void udplite6_proc_exit_net(struct net *net) 112static void __net_exit udplite6_proc_exit_net(struct net *net)
113{ 113{
114 udp_proc_unregister(net, &udplite6_seq_afinfo); 114 udp_proc_unregister(net, &udplite6_seq_afinfo);
115} 115}
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 9084582d236..2bc98ede123 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -101,7 +101,7 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
101 break; 101 break;
102 } 102 }
103 103
104 x = xfrm_state_lookup_byaddr(net, dst, src, proto, AF_INET6); 104 x = xfrm_state_lookup_byaddr(net, skb->mark, dst, src, proto, AF_INET6);
105 if (!x) 105 if (!x)
106 continue; 106 continue;
107 107
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index c4f4eef032a..0c92112dcba 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -38,7 +38,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
38 38
39 if (!skb->local_df && skb->len > mtu) { 39 if (!skb->local_df && skb->len > mtu) {
40 skb->dev = dst->dev; 40 skb->dev = dst->dev;
41 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 41 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
42 ret = -EMSGSIZE; 42 ret = -EMSGSIZE;
43 } 43 }
44 44
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 438831d3359..fa85a7d22dc 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -30,6 +30,25 @@
30#include <linux/ipv6.h> 30#include <linux/ipv6.h>
31#include <linux/icmpv6.h> 31#include <linux/icmpv6.h>
32#include <linux/mutex.h> 32#include <linux/mutex.h>
33#include <net/netns/generic.h>
34
35#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
36#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
37
38#define XFRM6_TUNNEL_SPI_MIN 1
39#define XFRM6_TUNNEL_SPI_MAX 0xffffffff
40
41struct xfrm6_tunnel_net {
42 struct hlist_head spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
43 struct hlist_head spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
44 u32 spi;
45};
46
47static int xfrm6_tunnel_net_id __read_mostly;
48static inline struct xfrm6_tunnel_net *xfrm6_tunnel_pernet(struct net *net)
49{
50 return net_generic(net, xfrm6_tunnel_net_id);
51}
33 52
34/* 53/*
35 * xfrm_tunnel_spi things are for allocating unique id ("spi") 54 * xfrm_tunnel_spi things are for allocating unique id ("spi")
@@ -46,19 +65,8 @@ struct xfrm6_tunnel_spi {
46 65
47static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock); 66static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
48 67
49static u32 xfrm6_tunnel_spi;
50
51#define XFRM6_TUNNEL_SPI_MIN 1
52#define XFRM6_TUNNEL_SPI_MAX 0xffffffff
53
54static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; 68static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
55 69
56#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
57#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
58
59static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
60static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
61
62static inline unsigned xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr) 70static inline unsigned xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
63{ 71{
64 unsigned h; 72 unsigned h;
@@ -76,50 +84,14 @@ static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi)
76 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE; 84 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
77} 85}
78 86
79 87static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr)
80static int xfrm6_tunnel_spi_init(void)
81{
82 int i;
83
84 xfrm6_tunnel_spi = 0;
85 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
86 sizeof(struct xfrm6_tunnel_spi),
87 0, SLAB_HWCACHE_ALIGN,
88 NULL);
89 if (!xfrm6_tunnel_spi_kmem)
90 return -ENOMEM;
91
92 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
93 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]);
94 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
95 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byspi[i]);
96 return 0;
97}
98
99static void xfrm6_tunnel_spi_fini(void)
100{
101 int i;
102
103 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) {
104 if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i]))
105 return;
106 }
107 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) {
108 if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i]))
109 return;
110 }
111 rcu_barrier();
112 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
113 xfrm6_tunnel_spi_kmem = NULL;
114}
115
116static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
117{ 88{
89 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
118 struct xfrm6_tunnel_spi *x6spi; 90 struct xfrm6_tunnel_spi *x6spi;
119 struct hlist_node *pos; 91 struct hlist_node *pos;
120 92
121 hlist_for_each_entry_rcu(x6spi, pos, 93 hlist_for_each_entry_rcu(x6spi, pos,
122 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 94 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
123 list_byaddr) { 95 list_byaddr) {
124 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) 96 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0)
125 return x6spi; 97 return x6spi;
@@ -128,13 +100,13 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
128 return NULL; 100 return NULL;
129} 101}
130 102
131__be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) 103__be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr)
132{ 104{
133 struct xfrm6_tunnel_spi *x6spi; 105 struct xfrm6_tunnel_spi *x6spi;
134 u32 spi; 106 u32 spi;
135 107
136 rcu_read_lock_bh(); 108 rcu_read_lock_bh();
137 x6spi = __xfrm6_tunnel_spi_lookup(saddr); 109 x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
138 spi = x6spi ? x6spi->spi : 0; 110 spi = x6spi ? x6spi->spi : 0;
139 rcu_read_unlock_bh(); 111 rcu_read_unlock_bh();
140 return htonl(spi); 112 return htonl(spi);
@@ -142,14 +114,15 @@ __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
142 114
143EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup); 115EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
144 116
145static int __xfrm6_tunnel_spi_check(u32 spi) 117static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi)
146{ 118{
119 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
147 struct xfrm6_tunnel_spi *x6spi; 120 struct xfrm6_tunnel_spi *x6spi;
148 int index = xfrm6_tunnel_spi_hash_byspi(spi); 121 int index = xfrm6_tunnel_spi_hash_byspi(spi);
149 struct hlist_node *pos; 122 struct hlist_node *pos;
150 123
151 hlist_for_each_entry(x6spi, pos, 124 hlist_for_each_entry(x6spi, pos,
152 &xfrm6_tunnel_spi_byspi[index], 125 &xfrm6_tn->spi_byspi[index],
153 list_byspi) { 126 list_byspi) {
154 if (x6spi->spi == spi) 127 if (x6spi->spi == spi)
155 return -1; 128 return -1;
@@ -157,61 +130,61 @@ static int __xfrm6_tunnel_spi_check(u32 spi)
157 return index; 130 return index;
158} 131}
159 132
160static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) 133static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
161{ 134{
135 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
162 u32 spi; 136 u32 spi;
163 struct xfrm6_tunnel_spi *x6spi; 137 struct xfrm6_tunnel_spi *x6spi;
164 int index; 138 int index;
165 139
166 if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN || 140 if (xfrm6_tn->spi < XFRM6_TUNNEL_SPI_MIN ||
167 xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX) 141 xfrm6_tn->spi >= XFRM6_TUNNEL_SPI_MAX)
168 xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN; 142 xfrm6_tn->spi = XFRM6_TUNNEL_SPI_MIN;
169 else 143 else
170 xfrm6_tunnel_spi++; 144 xfrm6_tn->spi++;
171 145
172 for (spi = xfrm6_tunnel_spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) { 146 for (spi = xfrm6_tn->spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
173 index = __xfrm6_tunnel_spi_check(spi); 147 index = __xfrm6_tunnel_spi_check(net, spi);
174 if (index >= 0) 148 if (index >= 0)
175 goto alloc_spi; 149 goto alloc_spi;
176 } 150 }
177 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tunnel_spi; spi++) { 151 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
178 index = __xfrm6_tunnel_spi_check(spi); 152 index = __xfrm6_tunnel_spi_check(net, spi);
179 if (index >= 0) 153 if (index >= 0)
180 goto alloc_spi; 154 goto alloc_spi;
181 } 155 }
182 spi = 0; 156 spi = 0;
183 goto out; 157 goto out;
184alloc_spi: 158alloc_spi:
185 xfrm6_tunnel_spi = spi; 159 xfrm6_tn->spi = spi;
186 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC); 160 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
187 if (!x6spi) 161 if (!x6spi)
188 goto out; 162 goto out;
189 163
190 INIT_RCU_HEAD(&x6spi->rcu_head);
191 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr)); 164 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
192 x6spi->spi = spi; 165 x6spi->spi = spi;
193 atomic_set(&x6spi->refcnt, 1); 166 atomic_set(&x6spi->refcnt, 1);
194 167
195 hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]); 168 hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tn->spi_byspi[index]);
196 169
197 index = xfrm6_tunnel_spi_hash_byaddr(saddr); 170 index = xfrm6_tunnel_spi_hash_byaddr(saddr);
198 hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]); 171 hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tn->spi_byaddr[index]);
199out: 172out:
200 return spi; 173 return spi;
201} 174}
202 175
203__be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) 176__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
204{ 177{
205 struct xfrm6_tunnel_spi *x6spi; 178 struct xfrm6_tunnel_spi *x6spi;
206 u32 spi; 179 u32 spi;
207 180
208 spin_lock_bh(&xfrm6_tunnel_spi_lock); 181 spin_lock_bh(&xfrm6_tunnel_spi_lock);
209 x6spi = __xfrm6_tunnel_spi_lookup(saddr); 182 x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
210 if (x6spi) { 183 if (x6spi) {
211 atomic_inc(&x6spi->refcnt); 184 atomic_inc(&x6spi->refcnt);
212 spi = x6spi->spi; 185 spi = x6spi->spi;
213 } else 186 } else
214 spi = __xfrm6_tunnel_alloc_spi(saddr); 187 spi = __xfrm6_tunnel_alloc_spi(net, saddr);
215 spin_unlock_bh(&xfrm6_tunnel_spi_lock); 188 spin_unlock_bh(&xfrm6_tunnel_spi_lock);
216 189
217 return htonl(spi); 190 return htonl(spi);
@@ -225,15 +198,16 @@ static void x6spi_destroy_rcu(struct rcu_head *head)
225 container_of(head, struct xfrm6_tunnel_spi, rcu_head)); 198 container_of(head, struct xfrm6_tunnel_spi, rcu_head));
226} 199}
227 200
228void xfrm6_tunnel_free_spi(xfrm_address_t *saddr) 201void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr)
229{ 202{
203 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
230 struct xfrm6_tunnel_spi *x6spi; 204 struct xfrm6_tunnel_spi *x6spi;
231 struct hlist_node *pos, *n; 205 struct hlist_node *pos, *n;
232 206
233 spin_lock_bh(&xfrm6_tunnel_spi_lock); 207 spin_lock_bh(&xfrm6_tunnel_spi_lock);
234 208
235 hlist_for_each_entry_safe(x6spi, pos, n, 209 hlist_for_each_entry_safe(x6spi, pos, n,
236 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 210 &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
237 list_byaddr) 211 list_byaddr)
238 { 212 {
239 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { 213 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
@@ -263,10 +237,11 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
263 237
264static int xfrm6_tunnel_rcv(struct sk_buff *skb) 238static int xfrm6_tunnel_rcv(struct sk_buff *skb)
265{ 239{
240 struct net *net = dev_net(skb->dev);
266 struct ipv6hdr *iph = ipv6_hdr(skb); 241 struct ipv6hdr *iph = ipv6_hdr(skb);
267 __be32 spi; 242 __be32 spi;
268 243
269 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr); 244 spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&iph->saddr);
270 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0; 245 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0;
271} 246}
272 247
@@ -326,7 +301,9 @@ static int xfrm6_tunnel_init_state(struct xfrm_state *x)
326 301
327static void xfrm6_tunnel_destroy(struct xfrm_state *x) 302static void xfrm6_tunnel_destroy(struct xfrm_state *x)
328{ 303{
329 xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr); 304 struct net *net = xs_net(x);
305
306 xfrm6_tunnel_free_spi(net, (xfrm_address_t *)&x->props.saddr);
330} 307}
331 308
332static const struct xfrm_type xfrm6_tunnel_type = { 309static const struct xfrm_type xfrm6_tunnel_type = {
@@ -351,34 +328,73 @@ static struct xfrm6_tunnel xfrm46_tunnel_handler = {
351 .priority = 2, 328 .priority = 2,
352}; 329};
353 330
331static int __net_init xfrm6_tunnel_net_init(struct net *net)
332{
333 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
334 unsigned int i;
335
336 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
337 INIT_HLIST_HEAD(&xfrm6_tn->spi_byaddr[i]);
338 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
339 INIT_HLIST_HEAD(&xfrm6_tn->spi_byspi[i]);
340 xfrm6_tn->spi = 0;
341
342 return 0;
343}
344
345static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
346{
347}
348
349static struct pernet_operations xfrm6_tunnel_net_ops = {
350 .init = xfrm6_tunnel_net_init,
351 .exit = xfrm6_tunnel_net_exit,
352 .id = &xfrm6_tunnel_net_id,
353 .size = sizeof(struct xfrm6_tunnel_net),
354};
355
354static int __init xfrm6_tunnel_init(void) 356static int __init xfrm6_tunnel_init(void)
355{ 357{
356 if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0) 358 int rv;
357 goto err; 359
358 if (xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6)) 360 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
359 goto unreg; 361 sizeof(struct xfrm6_tunnel_spi),
360 if (xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET)) 362 0, SLAB_HWCACHE_ALIGN,
361 goto dereg6; 363 NULL);
362 if (xfrm6_tunnel_spi_init() < 0) 364 if (!xfrm6_tunnel_spi_kmem)
363 goto dereg46; 365 return -ENOMEM;
366 rv = register_pernet_subsys(&xfrm6_tunnel_net_ops);
367 if (rv < 0)
368 goto out_pernet;
369 rv = xfrm_register_type(&xfrm6_tunnel_type, AF_INET6);
370 if (rv < 0)
371 goto out_type;
372 rv = xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6);
373 if (rv < 0)
374 goto out_xfrm6;
375 rv = xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET);
376 if (rv < 0)
377 goto out_xfrm46;
364 return 0; 378 return 0;
365 379
366dereg46: 380out_xfrm46:
367 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
368dereg6:
369 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6); 381 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
370unreg: 382out_xfrm6:
371 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); 383 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
372err: 384out_type:
373 return -EAGAIN; 385 unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
386out_pernet:
387 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
388 return rv;
374} 389}
375 390
376static void __exit xfrm6_tunnel_fini(void) 391static void __exit xfrm6_tunnel_fini(void)
377{ 392{
378 xfrm6_tunnel_spi_fini();
379 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET); 393 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
380 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6); 394 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
381 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); 395 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
396 unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
397 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
382} 398}
383 399
384module_init(xfrm6_tunnel_init); 400module_init(xfrm6_tunnel_init);
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index 576178482f8..26b5bfcf1d0 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -13,45 +13,15 @@
13#include <net/tcp_states.h> 13#include <net/tcp_states.h>
14#include <net/ipx.h> 14#include <net/ipx.h>
15 15
16static __inline__ struct ipx_interface *ipx_get_interface_idx(loff_t pos)
17{
18 struct ipx_interface *i;
19
20 list_for_each_entry(i, &ipx_interfaces, node)
21 if (!pos--)
22 goto out;
23 i = NULL;
24out:
25 return i;
26}
27
28static struct ipx_interface *ipx_interfaces_next(struct ipx_interface *i)
29{
30 struct ipx_interface *rc = NULL;
31
32 if (i->node.next != &ipx_interfaces)
33 rc = list_entry(i->node.next, struct ipx_interface, node);
34 return rc;
35}
36
37static void *ipx_seq_interface_start(struct seq_file *seq, loff_t *pos) 16static void *ipx_seq_interface_start(struct seq_file *seq, loff_t *pos)
38{ 17{
39 loff_t l = *pos;
40
41 spin_lock_bh(&ipx_interfaces_lock); 18 spin_lock_bh(&ipx_interfaces_lock);
42 return l ? ipx_get_interface_idx(--l) : SEQ_START_TOKEN; 19 return seq_list_start_head(&ipx_interfaces, *pos);
43} 20}
44 21
45static void *ipx_seq_interface_next(struct seq_file *seq, void *v, loff_t *pos) 22static void *ipx_seq_interface_next(struct seq_file *seq, void *v, loff_t *pos)
46{ 23{
47 struct ipx_interface *i; 24 return seq_list_next(v, &ipx_interfaces, pos);
48
49 ++*pos;
50 if (v == SEQ_START_TOKEN)
51 i = ipx_interfaces_head();
52 else
53 i = ipx_interfaces_next(v);
54 return i;
55} 25}
56 26
57static void ipx_seq_interface_stop(struct seq_file *seq, void *v) 27static void ipx_seq_interface_stop(struct seq_file *seq, void *v)
@@ -63,7 +33,7 @@ static int ipx_seq_interface_show(struct seq_file *seq, void *v)
63{ 33{
64 struct ipx_interface *i; 34 struct ipx_interface *i;
65 35
66 if (v == SEQ_START_TOKEN) { 36 if (v == &ipx_interfaces) {
67 seq_puts(seq, "Network Node_Address Primary Device " 37 seq_puts(seq, "Network Node_Address Primary Device "
68 "Frame_Type"); 38 "Frame_Type");
69#ifdef IPX_REFCNT_DEBUG 39#ifdef IPX_REFCNT_DEBUG
@@ -73,7 +43,7 @@ static int ipx_seq_interface_show(struct seq_file *seq, void *v)
73 goto out; 43 goto out;
74 } 44 }
75 45
76 i = v; 46 i = list_entry(v, struct ipx_interface, node);
77 seq_printf(seq, "%08lX ", (unsigned long int)ntohl(i->if_netnum)); 47 seq_printf(seq, "%08lX ", (unsigned long int)ntohl(i->if_netnum));
78 seq_printf(seq, "%02X%02X%02X%02X%02X%02X ", 48 seq_printf(seq, "%02X%02X%02X%02X%02X%02X ",
79 i->if_node[0], i->if_node[1], i->if_node[2], 49 i->if_node[0], i->if_node[1], i->if_node[2],
@@ -89,53 +59,15 @@ out:
89 return 0; 59 return 0;
90} 60}
91 61
92static struct ipx_route *ipx_routes_head(void)
93{
94 struct ipx_route *rc = NULL;
95
96 if (!list_empty(&ipx_routes))
97 rc = list_entry(ipx_routes.next, struct ipx_route, node);
98 return rc;
99}
100
101static struct ipx_route *ipx_routes_next(struct ipx_route *r)
102{
103 struct ipx_route *rc = NULL;
104
105 if (r->node.next != &ipx_routes)
106 rc = list_entry(r->node.next, struct ipx_route, node);
107 return rc;
108}
109
110static __inline__ struct ipx_route *ipx_get_route_idx(loff_t pos)
111{
112 struct ipx_route *r;
113
114 list_for_each_entry(r, &ipx_routes, node)
115 if (!pos--)
116 goto out;
117 r = NULL;
118out:
119 return r;
120}
121
122static void *ipx_seq_route_start(struct seq_file *seq, loff_t *pos) 62static void *ipx_seq_route_start(struct seq_file *seq, loff_t *pos)
123{ 63{
124 loff_t l = *pos;
125 read_lock_bh(&ipx_routes_lock); 64 read_lock_bh(&ipx_routes_lock);
126 return l ? ipx_get_route_idx(--l) : SEQ_START_TOKEN; 65 return seq_list_start_head(&ipx_routes, *pos);
127} 66}
128 67
129static void *ipx_seq_route_next(struct seq_file *seq, void *v, loff_t *pos) 68static void *ipx_seq_route_next(struct seq_file *seq, void *v, loff_t *pos)
130{ 69{
131 struct ipx_route *r; 70 return seq_list_next(v, &ipx_routes, pos);
132
133 ++*pos;
134 if (v == SEQ_START_TOKEN)
135 r = ipx_routes_head();
136 else
137 r = ipx_routes_next(v);
138 return r;
139} 71}
140 72
141static void ipx_seq_route_stop(struct seq_file *seq, void *v) 73static void ipx_seq_route_stop(struct seq_file *seq, void *v)
@@ -147,11 +79,13 @@ static int ipx_seq_route_show(struct seq_file *seq, void *v)
147{ 79{
148 struct ipx_route *rt; 80 struct ipx_route *rt;
149 81
150 if (v == SEQ_START_TOKEN) { 82 if (v == &ipx_routes) {
151 seq_puts(seq, "Network Router_Net Router_Node\n"); 83 seq_puts(seq, "Network Router_Net Router_Node\n");
152 goto out; 84 goto out;
153 } 85 }
154 rt = v; 86
87 rt = list_entry(v, struct ipx_route, node);
88
155 seq_printf(seq, "%08lX ", (unsigned long int)ntohl(rt->ir_net)); 89 seq_printf(seq, "%08lX ", (unsigned long int)ntohl(rt->ir_net));
156 if (rt->ir_routed) 90 if (rt->ir_routed)
157 seq_printf(seq, "%08lX %02X%02X%02X%02X%02X%02X\n", 91 seq_printf(seq, "%08lX %02X%02X%02X%02X%02X%02X\n",
@@ -226,9 +160,9 @@ static void *ipx_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
226 spin_unlock_bh(&i->if_sklist_lock); 160 spin_unlock_bh(&i->if_sklist_lock);
227 sk = NULL; 161 sk = NULL;
228 for (;;) { 162 for (;;) {
229 i = ipx_interfaces_next(i); 163 if (i->node.next == &ipx_interfaces)
230 if (!i)
231 break; 164 break;
165 i = list_entry(i->node.next, struct ipx_interface, node);
232 spin_lock_bh(&i->if_sklist_lock); 166 spin_lock_bh(&i->if_sklist_lock);
233 if (!hlist_empty(&i->if_sklist)) { 167 if (!hlist_empty(&i->if_sklist)) {
234 sk = sk_head(&i->if_sklist); 168 sk = sk_head(&i->if_sklist);
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 811984d9324..8b85d774e47 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -496,9 +496,6 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
496 496
497 IRDA_DEBUG(0, "%s()\n", __func__ ); 497 IRDA_DEBUG(0, "%s()\n", __func__ );
498 498
499 if (!tty)
500 return;
501
502 IRDA_ASSERT(self != NULL, return;); 499 IRDA_ASSERT(self != NULL, return;);
503 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 500 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
504 501
@@ -1007,9 +1004,6 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
1007 IRDA_ASSERT(self != NULL, return;); 1004 IRDA_ASSERT(self != NULL, return;);
1008 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); 1005 IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
1009 1006
1010 if (!tty)
1011 return;
1012
1013 /* ircomm_tty_flush_buffer(tty); */ 1007 /* ircomm_tty_flush_buffer(tty); */
1014 ircomm_tty_shutdown(self); 1008 ircomm_tty_shutdown(self);
1015 1009
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 315ead3cb92..e486dc89ea5 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -1128,34 +1128,14 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
1128 */ 1128 */
1129static void *irlan_seq_start(struct seq_file *seq, loff_t *pos) 1129static void *irlan_seq_start(struct seq_file *seq, loff_t *pos)
1130{ 1130{
1131 int i = 1;
1132 struct irlan_cb *self;
1133
1134 rcu_read_lock(); 1131 rcu_read_lock();
1135 if (*pos == 0) 1132 return seq_list_start_head(&irlans, *pos);
1136 return SEQ_START_TOKEN;
1137
1138 list_for_each_entry(self, &irlans, dev_list) {
1139 if (*pos == i)
1140 return self;
1141 ++i;
1142 }
1143 return NULL;
1144} 1133}
1145 1134
1146/* Return entry after v, and increment pos */ 1135/* Return entry after v, and increment pos */
1147static void *irlan_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1136static void *irlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1148{ 1137{
1149 struct list_head *nxt; 1138 return seq_list_next(v, &irlans, pos);
1150
1151 ++*pos;
1152 if (v == SEQ_START_TOKEN)
1153 nxt = irlans.next;
1154 else
1155 nxt = ((struct irlan_cb *)v)->dev_list.next;
1156
1157 return (nxt == &irlans) ? NULL
1158 : list_entry(nxt, struct irlan_cb, dev_list);
1159} 1139}
1160 1140
1161/* End of reading /proc file */ 1141/* End of reading /proc file */
@@ -1170,10 +1150,10 @@ static void irlan_seq_stop(struct seq_file *seq, void *v)
1170 */ 1150 */
1171static int irlan_seq_show(struct seq_file *seq, void *v) 1151static int irlan_seq_show(struct seq_file *seq, void *v)
1172{ 1152{
1173 if (v == SEQ_START_TOKEN) 1153 if (v == &irlans)
1174 seq_puts(seq, "IrLAN instances:\n"); 1154 seq_puts(seq, "IrLAN instances:\n");
1175 else { 1155 else {
1176 struct irlan_cb *self = v; 1156 struct irlan_cb *self = list_entry(v, struct irlan_cb, dev_list);
1177 1157
1178 IRDA_ASSERT(self != NULL, return -1;); 1158 IRDA_ASSERT(self != NULL, return -1;);
1179 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); 1159 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index d340110f5c0..9616c32d107 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -321,14 +321,15 @@ static void irlan_eth_set_multicast_list(struct net_device *dev)
321 /* Enable promiscuous mode */ 321 /* Enable promiscuous mode */
322 IRDA_WARNING("Promiscuous mode not implemented by IrLAN!\n"); 322 IRDA_WARNING("Promiscuous mode not implemented by IrLAN!\n");
323 } 323 }
324 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) { 324 else if ((dev->flags & IFF_ALLMULTI) ||
325 netdev_mc_count(dev) > HW_MAX_ADDRS) {
325 /* Disable promiscuous mode, use normal mode. */ 326 /* Disable promiscuous mode, use normal mode. */
326 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ ); 327 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
327 /* hardware_set_filter(NULL); */ 328 /* hardware_set_filter(NULL); */
328 329
329 irlan_set_multicast_filter(self, TRUE); 330 irlan_set_multicast_filter(self, TRUE);
330 } 331 }
331 else if (dev->mc_count) { 332 else if (!netdev_mc_empty(dev)) {
332 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ ); 333 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
333 /* Walk the address list, and load the filter */ 334 /* Walk the address list, and load the filter */
334 /* hardware_set_filter(dev->mc_list); */ 335 /* hardware_set_filter(dev->mc_list); */
diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c
index 476b307bd80..69b5b75f543 100644
--- a/net/irda/irnetlink.c
+++ b/net/irda/irnetlink.c
@@ -124,7 +124,7 @@ static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info)
124 return ret; 124 return ret;
125} 125}
126 126
127static struct nla_policy irda_nl_policy[IRDA_NL_ATTR_MAX + 1] = { 127static const struct nla_policy irda_nl_policy[IRDA_NL_ATTR_MAX + 1] = {
128 [IRDA_NL_ATTR_IFNAME] = { .type = NLA_NUL_STRING, 128 [IRDA_NL_ATTR_IFNAME] = { .type = NLA_NUL_STRING,
129 .len = IFNAMSIZ-1 }, 129 .len = IFNAMSIZ-1 },
130 [IRDA_NL_ATTR_MODE] = { .type = NLA_U32 }, 130 [IRDA_NL_ATTR_MODE] = { .type = NLA_U32 },
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 539f43bc97d..36870788264 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -41,10 +41,10 @@ struct netns_pfkey {
41 struct hlist_head table; 41 struct hlist_head table;
42 atomic_t socks_nr; 42 atomic_t socks_nr;
43}; 43};
44static DECLARE_WAIT_QUEUE_HEAD(pfkey_table_wait); 44static DEFINE_MUTEX(pfkey_mutex);
45static DEFINE_RWLOCK(pfkey_table_lock);
46static atomic_t pfkey_table_users = ATOMIC_INIT(0);
47 45
46#define DUMMY_MARK 0
47static struct xfrm_mark dummy_mark = {0, 0};
48struct pfkey_sock { 48struct pfkey_sock {
49 /* struct sock must be the first member of struct pfkey_sock */ 49 /* struct sock must be the first member of struct pfkey_sock */
50 struct sock sk; 50 struct sock sk;
@@ -108,50 +108,6 @@ static void pfkey_sock_destruct(struct sock *sk)
108 atomic_dec(&net_pfkey->socks_nr); 108 atomic_dec(&net_pfkey->socks_nr);
109} 109}
110 110
111static void pfkey_table_grab(void)
112{
113 write_lock_bh(&pfkey_table_lock);
114
115 if (atomic_read(&pfkey_table_users)) {
116 DECLARE_WAITQUEUE(wait, current);
117
118 add_wait_queue_exclusive(&pfkey_table_wait, &wait);
119 for(;;) {
120 set_current_state(TASK_UNINTERRUPTIBLE);
121 if (atomic_read(&pfkey_table_users) == 0)
122 break;
123 write_unlock_bh(&pfkey_table_lock);
124 schedule();
125 write_lock_bh(&pfkey_table_lock);
126 }
127
128 __set_current_state(TASK_RUNNING);
129 remove_wait_queue(&pfkey_table_wait, &wait);
130 }
131}
132
133static __inline__ void pfkey_table_ungrab(void)
134{
135 write_unlock_bh(&pfkey_table_lock);
136 wake_up(&pfkey_table_wait);
137}
138
139static __inline__ void pfkey_lock_table(void)
140{
141 /* read_lock() synchronizes us to pfkey_table_grab */
142
143 read_lock(&pfkey_table_lock);
144 atomic_inc(&pfkey_table_users);
145 read_unlock(&pfkey_table_lock);
146}
147
148static __inline__ void pfkey_unlock_table(void)
149{
150 if (atomic_dec_and_test(&pfkey_table_users))
151 wake_up(&pfkey_table_wait);
152}
153
154
155static const struct proto_ops pfkey_ops; 111static const struct proto_ops pfkey_ops;
156 112
157static void pfkey_insert(struct sock *sk) 113static void pfkey_insert(struct sock *sk)
@@ -159,16 +115,16 @@ static void pfkey_insert(struct sock *sk)
159 struct net *net = sock_net(sk); 115 struct net *net = sock_net(sk);
160 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 116 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
161 117
162 pfkey_table_grab(); 118 mutex_lock(&pfkey_mutex);
163 sk_add_node(sk, &net_pfkey->table); 119 sk_add_node_rcu(sk, &net_pfkey->table);
164 pfkey_table_ungrab(); 120 mutex_unlock(&pfkey_mutex);
165} 121}
166 122
167static void pfkey_remove(struct sock *sk) 123static void pfkey_remove(struct sock *sk)
168{ 124{
169 pfkey_table_grab(); 125 mutex_lock(&pfkey_mutex);
170 sk_del_node_init(sk); 126 sk_del_node_init_rcu(sk);
171 pfkey_table_ungrab(); 127 mutex_unlock(&pfkey_mutex);
172} 128}
173 129
174static struct proto key_proto = { 130static struct proto key_proto = {
@@ -223,6 +179,8 @@ static int pfkey_release(struct socket *sock)
223 sock_orphan(sk); 179 sock_orphan(sk);
224 sock->sk = NULL; 180 sock->sk = NULL;
225 skb_queue_purge(&sk->sk_write_queue); 181 skb_queue_purge(&sk->sk_write_queue);
182
183 synchronize_rcu();
226 sock_put(sk); 184 sock_put(sk);
227 185
228 return 0; 186 return 0;
@@ -277,8 +235,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
277 if (!skb) 235 if (!skb)
278 return -ENOMEM; 236 return -ENOMEM;
279 237
280 pfkey_lock_table(); 238 rcu_read_lock();
281 sk_for_each(sk, node, &net_pfkey->table) { 239 sk_for_each_rcu(sk, node, &net_pfkey->table) {
282 struct pfkey_sock *pfk = pfkey_sk(sk); 240 struct pfkey_sock *pfk = pfkey_sk(sk);
283 int err2; 241 int err2;
284 242
@@ -309,7 +267,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
309 if ((broadcast_flags & BROADCAST_REGISTERED) && err) 267 if ((broadcast_flags & BROADCAST_REGISTERED) && err)
310 err = err2; 268 err = err2;
311 } 269 }
312 pfkey_unlock_table(); 270 rcu_read_unlock();
313 271
314 if (one_sk != NULL) 272 if (one_sk != NULL)
315 err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); 273 err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
@@ -691,7 +649,7 @@ static struct xfrm_state *pfkey_xfrm_state_lookup(struct net *net, struct sadb_
691 if (!xaddr) 649 if (!xaddr)
692 return NULL; 650 return NULL;
693 651
694 return xfrm_state_lookup(net, xaddr, sa->sadb_sa_spi, proto, family); 652 return xfrm_state_lookup(net, DUMMY_MARK, xaddr, sa->sadb_sa_spi, proto, family);
695} 653}
696 654
697#define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1))) 655#define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1)))
@@ -1360,7 +1318,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
1360 } 1318 }
1361 1319
1362 if (hdr->sadb_msg_seq) { 1320 if (hdr->sadb_msg_seq) {
1363 x = xfrm_find_acq_byseq(net, hdr->sadb_msg_seq); 1321 x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq);
1364 if (x && xfrm_addr_cmp(&x->id.daddr, xdaddr, family)) { 1322 if (x && xfrm_addr_cmp(&x->id.daddr, xdaddr, family)) {
1365 xfrm_state_put(x); 1323 xfrm_state_put(x);
1366 x = NULL; 1324 x = NULL;
@@ -1368,7 +1326,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
1368 } 1326 }
1369 1327
1370 if (!x) 1328 if (!x)
1371 x = xfrm_find_acq(net, mode, reqid, proto, xdaddr, xsaddr, 1, family); 1329 x = xfrm_find_acq(net, &dummy_mark, mode, reqid, proto, xdaddr, xsaddr, 1, family);
1372 1330
1373 if (x == NULL) 1331 if (x == NULL)
1374 return -ENOENT; 1332 return -ENOENT;
@@ -1417,7 +1375,7 @@ static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, struct sadb_msg *
1417 if (hdr->sadb_msg_seq == 0 || hdr->sadb_msg_errno == 0) 1375 if (hdr->sadb_msg_seq == 0 || hdr->sadb_msg_errno == 0)
1418 return 0; 1376 return 0;
1419 1377
1420 x = xfrm_find_acq_byseq(net, hdr->sadb_msg_seq); 1378 x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq);
1421 if (x == NULL) 1379 if (x == NULL)
1422 return 0; 1380 return 0;
1423 1381
@@ -1712,6 +1670,23 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, struct sadb_msg
1712 return 0; 1670 return 0;
1713} 1671}
1714 1672
1673static int unicast_flush_resp(struct sock *sk, struct sadb_msg *ihdr)
1674{
1675 struct sk_buff *skb;
1676 struct sadb_msg *hdr;
1677
1678 skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
1679 if (!skb)
1680 return -ENOBUFS;
1681
1682 hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
1683 memcpy(hdr, ihdr, sizeof(struct sadb_msg));
1684 hdr->sadb_msg_errno = (uint8_t) 0;
1685 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
1686
1687 return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
1688}
1689
1715static int key_notify_sa_flush(struct km_event *c) 1690static int key_notify_sa_flush(struct km_event *c)
1716{ 1691{
1717 struct sk_buff *skb; 1692 struct sk_buff *skb;
@@ -1740,7 +1715,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hd
1740 unsigned proto; 1715 unsigned proto;
1741 struct km_event c; 1716 struct km_event c;
1742 struct xfrm_audit audit_info; 1717 struct xfrm_audit audit_info;
1743 int err; 1718 int err, err2;
1744 1719
1745 proto = pfkey_satype2proto(hdr->sadb_msg_satype); 1720 proto = pfkey_satype2proto(hdr->sadb_msg_satype);
1746 if (proto == 0) 1721 if (proto == 0)
@@ -1750,8 +1725,13 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hd
1750 audit_info.sessionid = audit_get_sessionid(current); 1725 audit_info.sessionid = audit_get_sessionid(current);
1751 audit_info.secid = 0; 1726 audit_info.secid = 0;
1752 err = xfrm_state_flush(net, proto, &audit_info); 1727 err = xfrm_state_flush(net, proto, &audit_info);
1753 if (err) 1728 err2 = unicast_flush_resp(sk, hdr);
1754 return err; 1729 if (err || err2) {
1730 if (err == -ESRCH) /* empty table - go quietly */
1731 err = 0;
1732 return err ? err : err2;
1733 }
1734
1755 c.data.proto = proto; 1735 c.data.proto = proto;
1756 c.seq = hdr->sadb_msg_seq; 1736 c.seq = hdr->sadb_msg_seq;
1757 c.pid = hdr->sadb_msg_pid; 1737 c.pid = hdr->sadb_msg_pid;
@@ -2346,7 +2326,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
2346 return err; 2326 return err;
2347 } 2327 }
2348 2328
2349 xp = xfrm_policy_bysel_ctx(net, XFRM_POLICY_TYPE_MAIN, 2329 xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, XFRM_POLICY_TYPE_MAIN,
2350 pol->sadb_x_policy_dir - 1, &sel, pol_ctx, 2330 pol->sadb_x_policy_dir - 1, &sel, pol_ctx,
2351 1, &err); 2331 1, &err);
2352 security_xfrm_policy_free(pol_ctx); 2332 security_xfrm_policy_free(pol_ctx);
@@ -2594,8 +2574,8 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
2594 return -EINVAL; 2574 return -EINVAL;
2595 2575
2596 delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2); 2576 delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2);
2597 xp = xfrm_policy_byid(net, XFRM_POLICY_TYPE_MAIN, dir, 2577 xp = xfrm_policy_byid(net, DUMMY_MARK, XFRM_POLICY_TYPE_MAIN,
2598 pol->sadb_x_policy_id, delete, &err); 2578 dir, pol->sadb_x_policy_id, delete, &err);
2599 if (xp == NULL) 2579 if (xp == NULL)
2600 return -ENOENT; 2580 return -ENOENT;
2601 2581
@@ -2706,14 +2686,19 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg
2706 struct net *net = sock_net(sk); 2686 struct net *net = sock_net(sk);
2707 struct km_event c; 2687 struct km_event c;
2708 struct xfrm_audit audit_info; 2688 struct xfrm_audit audit_info;
2709 int err; 2689 int err, err2;
2710 2690
2711 audit_info.loginuid = audit_get_loginuid(current); 2691 audit_info.loginuid = audit_get_loginuid(current);
2712 audit_info.sessionid = audit_get_sessionid(current); 2692 audit_info.sessionid = audit_get_sessionid(current);
2713 audit_info.secid = 0; 2693 audit_info.secid = 0;
2714 err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); 2694 err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2715 if (err) 2695 err2 = unicast_flush_resp(sk, hdr);
2696 if (err || err2) {
2697 if (err == -ESRCH) /* empty table - old silent behavior */
2698 return 0;
2716 return err; 2699 return err;
2700 }
2701
2717 c.data.type = XFRM_POLICY_TYPE_MAIN; 2702 c.data.type = XFRM_POLICY_TYPE_MAIN;
2718 c.event = XFRM_MSG_FLUSHPOLICY; 2703 c.event = XFRM_MSG_FLUSHPOLICY;
2719 c.pid = hdr->sadb_msg_pid; 2704 c.pid = hdr->sadb_msg_pid;
@@ -3019,12 +3004,11 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_e
3019static u32 get_acqseq(void) 3004static u32 get_acqseq(void)
3020{ 3005{
3021 u32 res; 3006 u32 res;
3022 static u32 acqseq; 3007 static atomic_t acqseq;
3023 static DEFINE_SPINLOCK(acqseq_lock);
3024 3008
3025 spin_lock_bh(&acqseq_lock); 3009 do {
3026 res = (++acqseq ? : ++acqseq); 3010 res = atomic_inc_return(&acqseq);
3027 spin_unlock_bh(&acqseq_lock); 3011 } while (!res);
3028 return res; 3012 return res;
3029} 3013}
3030 3014
@@ -3655,9 +3639,8 @@ static const struct net_proto_family pfkey_family_ops = {
3655#ifdef CONFIG_PROC_FS 3639#ifdef CONFIG_PROC_FS
3656static int pfkey_seq_show(struct seq_file *f, void *v) 3640static int pfkey_seq_show(struct seq_file *f, void *v)
3657{ 3641{
3658 struct sock *s; 3642 struct sock *s = sk_entry(v);
3659 3643
3660 s = (struct sock *)v;
3661 if (v == SEQ_START_TOKEN) 3644 if (v == SEQ_START_TOKEN)
3662 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n"); 3645 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
3663 else 3646 else
@@ -3676,19 +3659,9 @@ static void *pfkey_seq_start(struct seq_file *f, loff_t *ppos)
3676{ 3659{
3677 struct net *net = seq_file_net(f); 3660 struct net *net = seq_file_net(f);
3678 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 3661 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
3679 struct sock *s;
3680 struct hlist_node *node;
3681 loff_t pos = *ppos;
3682
3683 read_lock(&pfkey_table_lock);
3684 if (pos == 0)
3685 return SEQ_START_TOKEN;
3686 3662
3687 sk_for_each(s, node, &net_pfkey->table) 3663 rcu_read_lock();
3688 if (pos-- == 1) 3664 return seq_hlist_start_head_rcu(&net_pfkey->table, *ppos);
3689 return s;
3690
3691 return NULL;
3692} 3665}
3693 3666
3694static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos) 3667static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos)
@@ -3696,15 +3669,12 @@ static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos)
3696 struct net *net = seq_file_net(f); 3669 struct net *net = seq_file_net(f);
3697 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 3670 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
3698 3671
3699 ++*ppos; 3672 return seq_hlist_next_rcu(v, &net_pfkey->table, ppos);
3700 return (v == SEQ_START_TOKEN) ?
3701 sk_head(&net_pfkey->table) :
3702 sk_next((struct sock *)v);
3703} 3673}
3704 3674
3705static void pfkey_seq_stop(struct seq_file *f, void *v) 3675static void pfkey_seq_stop(struct seq_file *f, void *v)
3706{ 3676{
3707 read_unlock(&pfkey_table_lock); 3677 rcu_read_unlock();
3708} 3678}
3709 3679
3710static const struct seq_operations pfkey_seq_ops = { 3680static const struct seq_operations pfkey_seq_ops = {
@@ -3738,17 +3708,17 @@ static int __net_init pfkey_init_proc(struct net *net)
3738 return 0; 3708 return 0;
3739} 3709}
3740 3710
3741static void pfkey_exit_proc(struct net *net) 3711static void __net_exit pfkey_exit_proc(struct net *net)
3742{ 3712{
3743 proc_net_remove(net, "pfkey"); 3713 proc_net_remove(net, "pfkey");
3744} 3714}
3745#else 3715#else
3746static int __net_init pfkey_init_proc(struct net *net) 3716static inline int pfkey_init_proc(struct net *net)
3747{ 3717{
3748 return 0; 3718 return 0;
3749} 3719}
3750 3720
3751static void pfkey_exit_proc(struct net *net) 3721static inline void pfkey_exit_proc(struct net *net)
3752{ 3722{
3753} 3723}
3754#endif 3724#endif
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 3a66546cad0..e35d907fba2 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -47,6 +47,10 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout);
47#define dprintk(args...) 47#define dprintk(args...)
48#endif 48#endif
49 49
50/* Maybe we'll add some more in the future. */
51#define LLC_CMSG_PKTINFO 1
52
53
50/** 54/**
51 * llc_ui_next_link_no - return the next unused link number for a sap 55 * llc_ui_next_link_no - return the next unused link number for a sap
52 * @sap: Address of sap to get link number from. 56 * @sap: Address of sap to get link number from.
@@ -136,6 +140,7 @@ static struct proto llc_proto = {
136 .name = "LLC", 140 .name = "LLC",
137 .owner = THIS_MODULE, 141 .owner = THIS_MODULE,
138 .obj_size = sizeof(struct llc_sock), 142 .obj_size = sizeof(struct llc_sock),
143 .slab_flags = SLAB_DESTROY_BY_RCU,
139}; 144};
140 145
141/** 146/**
@@ -192,10 +197,8 @@ static int llc_ui_release(struct socket *sock)
192 llc->laddr.lsap, llc->daddr.lsap); 197 llc->laddr.lsap, llc->daddr.lsap);
193 if (!llc_send_disc(sk)) 198 if (!llc_send_disc(sk))
194 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); 199 llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
195 if (!sock_flag(sk, SOCK_ZAPPED)) { 200 if (!sock_flag(sk, SOCK_ZAPPED))
196 llc_sap_put(llc->sap);
197 llc_sap_remove_socket(llc->sap, sk); 201 llc_sap_remove_socket(llc->sap, sk);
198 }
199 release_sock(sk); 202 release_sock(sk);
200 if (llc->dev) 203 if (llc->dev)
201 dev_put(llc->dev); 204 dev_put(llc->dev);
@@ -255,7 +258,14 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
255 if (!sock_flag(sk, SOCK_ZAPPED)) 258 if (!sock_flag(sk, SOCK_ZAPPED))
256 goto out; 259 goto out;
257 rc = -ENODEV; 260 rc = -ENODEV;
258 llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd); 261 if (sk->sk_bound_dev_if) {
262 llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
263 if (llc->dev && addr->sllc_arphrd != llc->dev->type) {
264 dev_put(llc->dev);
265 llc->dev = NULL;
266 }
267 } else
268 llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd);
259 if (!llc->dev) 269 if (!llc->dev)
260 goto out; 270 goto out;
261 rc = -EUSERS; 271 rc = -EUSERS;
@@ -306,7 +316,25 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
306 goto out; 316 goto out;
307 rc = -ENODEV; 317 rc = -ENODEV;
308 rtnl_lock(); 318 rtnl_lock();
309 llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd, addr->sllc_mac); 319 if (sk->sk_bound_dev_if) {
320 llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
321 if (llc->dev) {
322 if (!addr->sllc_arphrd)
323 addr->sllc_arphrd = llc->dev->type;
324 if (llc_mac_null(addr->sllc_mac))
325 memcpy(addr->sllc_mac, llc->dev->dev_addr,
326 IFHWADDRLEN);
327 if (addr->sllc_arphrd != llc->dev->type ||
328 !llc_mac_match(addr->sllc_mac,
329 llc->dev->dev_addr)) {
330 rc = -EINVAL;
331 dev_put(llc->dev);
332 llc->dev = NULL;
333 }
334 }
335 } else
336 llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd,
337 addr->sllc_mac);
310 rtnl_unlock(); 338 rtnl_unlock();
311 if (!llc->dev) 339 if (!llc->dev)
312 goto out; 340 goto out;
@@ -322,7 +350,6 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
322 rc = -EBUSY; /* some other network layer is using the sap */ 350 rc = -EBUSY; /* some other network layer is using the sap */
323 if (!sap) 351 if (!sap)
324 goto out; 352 goto out;
325 llc_sap_hold(sap);
326 } else { 353 } else {
327 struct llc_addr laddr, daddr; 354 struct llc_addr laddr, daddr;
328 struct sock *ask; 355 struct sock *ask;
@@ -591,6 +618,20 @@ static int llc_wait_data(struct sock *sk, long timeo)
591 return rc; 618 return rc;
592} 619}
593 620
621static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
622{
623 struct llc_sock *llc = llc_sk(skb->sk);
624
625 if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
626 struct llc_pktinfo info;
627
628 info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
629 llc_pdu_decode_dsap(skb, &info.lpi_sap);
630 llc_pdu_decode_da(skb, info.lpi_mac);
631 put_cmsg(msg, SOL_LLC, LLC_OPT_PKTINFO, sizeof(info), &info);
632 }
633}
634
594/** 635/**
595 * llc_ui_accept - accept a new incoming connection. 636 * llc_ui_accept - accept a new incoming connection.
596 * @sock: Socket which connections arrive on. 637 * @sock: Socket which connections arrive on.
@@ -812,6 +853,8 @@ copy_uaddr:
812 memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr)); 853 memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
813 msg->msg_namelen = sizeof(*uaddr); 854 msg->msg_namelen = sizeof(*uaddr);
814 } 855 }
856 if (llc_sk(sk)->cmsg_flags)
857 llc_cmsg_rcv(msg, skb);
815 goto out; 858 goto out;
816} 859}
817 860
@@ -1030,6 +1073,12 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
1030 goto out; 1073 goto out;
1031 llc->rw = opt; 1074 llc->rw = opt;
1032 break; 1075 break;
1076 case LLC_OPT_PKTINFO:
1077 if (opt)
1078 llc->cmsg_flags |= LLC_CMSG_PKTINFO;
1079 else
1080 llc->cmsg_flags &= ~LLC_CMSG_PKTINFO;
1081 break;
1033 default: 1082 default:
1034 rc = -ENOPROTOOPT; 1083 rc = -ENOPROTOOPT;
1035 goto out; 1084 goto out;
@@ -1083,6 +1132,9 @@ static int llc_ui_getsockopt(struct socket *sock, int level, int optname,
1083 val = llc->k; break; 1132 val = llc->k; break;
1084 case LLC_OPT_RX_WIN: 1133 case LLC_OPT_RX_WIN:
1085 val = llc->rw; break; 1134 val = llc->rw; break;
1135 case LLC_OPT_PKTINFO:
1136 val = (llc->cmsg_flags & LLC_CMSG_PKTINFO) != 0;
1137 break;
1086 default: 1138 default:
1087 rc = -ENOPROTOOPT; 1139 rc = -ENOPROTOOPT;
1088 goto out; 1140 goto out;
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index c6bab39b018..a8dde9b010d 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -468,6 +468,19 @@ static int llc_exec_conn_trans_actions(struct sock *sk,
468 return rc; 468 return rc;
469} 469}
470 470
471static inline bool llc_estab_match(const struct llc_sap *sap,
472 const struct llc_addr *daddr,
473 const struct llc_addr *laddr,
474 const struct sock *sk)
475{
476 struct llc_sock *llc = llc_sk(sk);
477
478 return llc->laddr.lsap == laddr->lsap &&
479 llc->daddr.lsap == daddr->lsap &&
480 llc_mac_match(llc->laddr.mac, laddr->mac) &&
481 llc_mac_match(llc->daddr.mac, daddr->mac);
482}
483
471/** 484/**
472 * __llc_lookup_established - Finds connection for the remote/local sap/mac 485 * __llc_lookup_established - Finds connection for the remote/local sap/mac
473 * @sap: SAP 486 * @sap: SAP
@@ -484,23 +497,35 @@ static struct sock *__llc_lookup_established(struct llc_sap *sap,
484 struct llc_addr *laddr) 497 struct llc_addr *laddr)
485{ 498{
486 struct sock *rc; 499 struct sock *rc;
487 struct hlist_node *node; 500 struct hlist_nulls_node *node;
488 501 int slot = llc_sk_laddr_hashfn(sap, laddr);
489 read_lock(&sap->sk_list.lock); 502 struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
490 sk_for_each(rc, node, &sap->sk_list.list) { 503
491 struct llc_sock *llc = llc_sk(rc); 504 rcu_read_lock();
492 505again:
493 if (llc->laddr.lsap == laddr->lsap && 506 sk_nulls_for_each_rcu(rc, node, laddr_hb) {
494 llc->daddr.lsap == daddr->lsap && 507 if (llc_estab_match(sap, daddr, laddr, rc)) {
495 llc_mac_match(llc->laddr.mac, laddr->mac) && 508 /* Extra checks required by SLAB_DESTROY_BY_RCU */
496 llc_mac_match(llc->daddr.mac, daddr->mac)) { 509 if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
497 sock_hold(rc); 510 goto again;
511 if (unlikely(llc_sk(rc)->sap != sap ||
512 !llc_estab_match(sap, daddr, laddr, rc))) {
513 sock_put(rc);
514 continue;
515 }
498 goto found; 516 goto found;
499 } 517 }
500 } 518 }
501 rc = NULL; 519 rc = NULL;
520 /*
521 * if the nulls value we got at the end of this lookup is
522 * not the expected one, we must restart lookup.
523 * We probably met an item that was moved to another chain.
524 */
525 if (unlikely(get_nulls_value(node) != slot))
526 goto again;
502found: 527found:
503 read_unlock(&sap->sk_list.lock); 528 rcu_read_unlock();
504 return rc; 529 return rc;
505} 530}
506 531
@@ -516,6 +541,53 @@ struct sock *llc_lookup_established(struct llc_sap *sap,
516 return sk; 541 return sk;
517} 542}
518 543
544static inline bool llc_listener_match(const struct llc_sap *sap,
545 const struct llc_addr *laddr,
546 const struct sock *sk)
547{
548 struct llc_sock *llc = llc_sk(sk);
549
550 return sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN &&
551 llc->laddr.lsap == laddr->lsap &&
552 llc_mac_match(llc->laddr.mac, laddr->mac);
553}
554
555static struct sock *__llc_lookup_listener(struct llc_sap *sap,
556 struct llc_addr *laddr)
557{
558 struct sock *rc;
559 struct hlist_nulls_node *node;
560 int slot = llc_sk_laddr_hashfn(sap, laddr);
561 struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
562
563 rcu_read_lock();
564again:
565 sk_nulls_for_each_rcu(rc, node, laddr_hb) {
566 if (llc_listener_match(sap, laddr, rc)) {
567 /* Extra checks required by SLAB_DESTROY_BY_RCU */
568 if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
569 goto again;
570 if (unlikely(llc_sk(rc)->sap != sap ||
571 !llc_listener_match(sap, laddr, rc))) {
572 sock_put(rc);
573 continue;
574 }
575 goto found;
576 }
577 }
578 rc = NULL;
579 /*
580 * if the nulls value we got at the end of this lookup is
581 * not the expected one, we must restart lookup.
582 * We probably met an item that was moved to another chain.
583 */
584 if (unlikely(get_nulls_value(node) != slot))
585 goto again;
586found:
587 rcu_read_unlock();
588 return rc;
589}
590
519/** 591/**
520 * llc_lookup_listener - Finds listener for local MAC + SAP 592 * llc_lookup_listener - Finds listener for local MAC + SAP
521 * @sap: SAP 593 * @sap: SAP
@@ -529,24 +601,12 @@ struct sock *llc_lookup_established(struct llc_sap *sap,
529static struct sock *llc_lookup_listener(struct llc_sap *sap, 601static struct sock *llc_lookup_listener(struct llc_sap *sap,
530 struct llc_addr *laddr) 602 struct llc_addr *laddr)
531{ 603{
532 struct sock *rc; 604 static struct llc_addr null_addr;
533 struct hlist_node *node; 605 struct sock *rc = __llc_lookup_listener(sap, laddr);
534 606
535 read_lock(&sap->sk_list.lock); 607 if (!rc)
536 sk_for_each(rc, node, &sap->sk_list.list) { 608 rc = __llc_lookup_listener(sap, &null_addr);
537 struct llc_sock *llc = llc_sk(rc);
538 609
539 if (rc->sk_type == SOCK_STREAM && rc->sk_state == TCP_LISTEN &&
540 llc->laddr.lsap == laddr->lsap &&
541 (llc_mac_match(llc->laddr.mac, laddr->mac) ||
542 llc_mac_null(llc->laddr.mac))) {
543 sock_hold(rc);
544 goto found;
545 }
546 }
547 rc = NULL;
548found:
549 read_unlock(&sap->sk_list.lock);
550 return rc; 610 return rc;
551} 611}
552 612
@@ -647,15 +707,22 @@ static int llc_find_offset(int state, int ev_type)
647 * @sap: SAP 707 * @sap: SAP
648 * @sk: socket 708 * @sk: socket
649 * 709 *
650 * This function adds a socket to sk_list of a SAP. 710 * This function adds a socket to the hash tables of a SAP.
651 */ 711 */
652void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk) 712void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
653{ 713{
714 struct llc_sock *llc = llc_sk(sk);
715 struct hlist_head *dev_hb = llc_sk_dev_hash(sap, llc->dev->ifindex);
716 struct hlist_nulls_head *laddr_hb = llc_sk_laddr_hash(sap, &llc->laddr);
717
654 llc_sap_hold(sap); 718 llc_sap_hold(sap);
655 write_lock_bh(&sap->sk_list.lock);
656 llc_sk(sk)->sap = sap; 719 llc_sk(sk)->sap = sap;
657 sk_add_node(sk, &sap->sk_list.list); 720
658 write_unlock_bh(&sap->sk_list.lock); 721 spin_lock_bh(&sap->sk_lock);
722 sap->sk_count++;
723 sk_nulls_add_node_rcu(sk, laddr_hb);
724 hlist_add_head(&llc->dev_hash_node, dev_hb);
725 spin_unlock_bh(&sap->sk_lock);
659} 726}
660 727
661/** 728/**
@@ -663,14 +730,18 @@ void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
663 * @sap: SAP 730 * @sap: SAP
664 * @sk: socket 731 * @sk: socket
665 * 732 *
666 * This function removes a connection from sk_list.list of a SAP if 733 * This function removes a connection from the hash tables of a SAP if
667 * the connection was in this list. 734 * the connection was in this list.
668 */ 735 */
669void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk) 736void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk)
670{ 737{
671 write_lock_bh(&sap->sk_list.lock); 738 struct llc_sock *llc = llc_sk(sk);
672 sk_del_node_init(sk); 739
673 write_unlock_bh(&sap->sk_list.lock); 740 spin_lock_bh(&sap->sk_lock);
741 sk_nulls_del_node_init_rcu(sk);
742 hlist_del(&llc->dev_hash_node);
743 sap->sk_count--;
744 spin_unlock_bh(&sap->sk_lock);
674 llc_sap_put(sap); 745 llc_sap_put(sap);
675} 746}
676 747
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index ff4c0ab96a6..78167e81dfe 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -23,7 +23,7 @@
23#include <net/llc.h> 23#include <net/llc.h>
24 24
25LIST_HEAD(llc_sap_list); 25LIST_HEAD(llc_sap_list);
26DEFINE_RWLOCK(llc_sap_list_lock); 26DEFINE_SPINLOCK(llc_sap_list_lock);
27 27
28/** 28/**
29 * llc_sap_alloc - allocates and initializes sap. 29 * llc_sap_alloc - allocates and initializes sap.
@@ -33,40 +33,19 @@ DEFINE_RWLOCK(llc_sap_list_lock);
33static struct llc_sap *llc_sap_alloc(void) 33static struct llc_sap *llc_sap_alloc(void)
34{ 34{
35 struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC); 35 struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC);
36 int i;
36 37
37 if (sap) { 38 if (sap) {
38 /* sap->laddr.mac - leave as a null, it's filled by bind */ 39 /* sap->laddr.mac - leave as a null, it's filled by bind */
39 sap->state = LLC_SAP_STATE_ACTIVE; 40 sap->state = LLC_SAP_STATE_ACTIVE;
40 rwlock_init(&sap->sk_list.lock); 41 spin_lock_init(&sap->sk_lock);
42 for (i = 0; i < LLC_SK_LADDR_HASH_ENTRIES; i++)
43 INIT_HLIST_NULLS_HEAD(&sap->sk_laddr_hash[i], i);
41 atomic_set(&sap->refcnt, 1); 44 atomic_set(&sap->refcnt, 1);
42 } 45 }
43 return sap; 46 return sap;
44} 47}
45 48
46/**
47 * llc_add_sap - add sap to station list
48 * @sap: Address of the sap
49 *
50 * Adds a sap to the LLC's station sap list.
51 */
52static void llc_add_sap(struct llc_sap *sap)
53{
54 list_add_tail(&sap->node, &llc_sap_list);
55}
56
57/**
58 * llc_del_sap - del sap from station list
59 * @sap: Address of the sap
60 *
61 * Removes a sap to the LLC's station sap list.
62 */
63static void llc_del_sap(struct llc_sap *sap)
64{
65 write_lock_bh(&llc_sap_list_lock);
66 list_del(&sap->node);
67 write_unlock_bh(&llc_sap_list_lock);
68}
69
70static struct llc_sap *__llc_sap_find(unsigned char sap_value) 49static struct llc_sap *__llc_sap_find(unsigned char sap_value)
71{ 50{
72 struct llc_sap* sap; 51 struct llc_sap* sap;
@@ -90,13 +69,13 @@ out:
90 */ 69 */
91struct llc_sap *llc_sap_find(unsigned char sap_value) 70struct llc_sap *llc_sap_find(unsigned char sap_value)
92{ 71{
93 struct llc_sap* sap; 72 struct llc_sap *sap;
94 73
95 read_lock_bh(&llc_sap_list_lock); 74 rcu_read_lock_bh();
96 sap = __llc_sap_find(sap_value); 75 sap = __llc_sap_find(sap_value);
97 if (sap) 76 if (sap)
98 llc_sap_hold(sap); 77 llc_sap_hold(sap);
99 read_unlock_bh(&llc_sap_list_lock); 78 rcu_read_unlock_bh();
100 return sap; 79 return sap;
101} 80}
102 81
@@ -117,7 +96,7 @@ struct llc_sap *llc_sap_open(unsigned char lsap,
117{ 96{
118 struct llc_sap *sap = NULL; 97 struct llc_sap *sap = NULL;
119 98
120 write_lock_bh(&llc_sap_list_lock); 99 spin_lock_bh(&llc_sap_list_lock);
121 if (__llc_sap_find(lsap)) /* SAP already exists */ 100 if (__llc_sap_find(lsap)) /* SAP already exists */
122 goto out; 101 goto out;
123 sap = llc_sap_alloc(); 102 sap = llc_sap_alloc();
@@ -125,9 +104,9 @@ struct llc_sap *llc_sap_open(unsigned char lsap,
125 goto out; 104 goto out;
126 sap->laddr.lsap = lsap; 105 sap->laddr.lsap = lsap;
127 sap->rcv_func = func; 106 sap->rcv_func = func;
128 llc_add_sap(sap); 107 list_add_tail_rcu(&sap->node, &llc_sap_list);
129out: 108out:
130 write_unlock_bh(&llc_sap_list_lock); 109 spin_unlock_bh(&llc_sap_list_lock);
131 return sap; 110 return sap;
132} 111}
133 112
@@ -142,8 +121,14 @@ out:
142 */ 121 */
143void llc_sap_close(struct llc_sap *sap) 122void llc_sap_close(struct llc_sap *sap)
144{ 123{
145 WARN_ON(!hlist_empty(&sap->sk_list.list)); 124 WARN_ON(sap->sk_count);
146 llc_del_sap(sap); 125
126 spin_lock_bh(&llc_sap_list_lock);
127 list_del_rcu(&sap->node);
128 spin_unlock_bh(&llc_sap_list_lock);
129
130 synchronize_rcu();
131
147 kfree(sap); 132 kfree(sap);
148} 133}
149 134
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index 754f4fedc85..b38a1079a98 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -33,48 +33,19 @@
33int llc_mac_hdr_init(struct sk_buff *skb, 33int llc_mac_hdr_init(struct sk_buff *skb,
34 const unsigned char *sa, const unsigned char *da) 34 const unsigned char *sa, const unsigned char *da)
35{ 35{
36 int rc = 0; 36 int rc = -EINVAL;
37 37
38 switch (skb->dev->type) { 38 switch (skb->dev->type) {
39#ifdef CONFIG_TR 39 case ARPHRD_IEEE802_TR:
40 case ARPHRD_IEEE802_TR: {
41 struct net_device *dev = skb->dev;
42 struct trh_hdr *trh;
43
44 skb_push(skb, sizeof(*trh));
45 skb_reset_mac_header(skb);
46 trh = tr_hdr(skb);
47 trh->ac = AC;
48 trh->fc = LLC_FRAME;
49 if (sa)
50 memcpy(trh->saddr, sa, dev->addr_len);
51 else
52 memset(trh->saddr, 0, dev->addr_len);
53 if (da) {
54 memcpy(trh->daddr, da, dev->addr_len);
55 tr_source_route(skb, trh, dev);
56 skb_reset_mac_header(skb);
57 }
58 break;
59 }
60#endif
61 case ARPHRD_ETHER: 40 case ARPHRD_ETHER:
62 case ARPHRD_LOOPBACK: { 41 case ARPHRD_LOOPBACK:
63 unsigned short len = skb->len; 42 rc = dev_hard_header(skb, skb->dev, ETH_P_802_2, da, sa,
64 struct ethhdr *eth; 43 skb->len);
65 44 if (rc > 0)
66 skb_push(skb, sizeof(*eth)); 45 rc = 0;
67 skb_reset_mac_header(skb);
68 eth = eth_hdr(skb);
69 eth->h_proto = htons(len);
70 memcpy(eth->h_dest, da, ETH_ALEN);
71 memcpy(eth->h_source, sa, ETH_ALEN);
72 break; 46 break;
73 }
74 default: 47 default:
75 printk(KERN_WARNING "device type not supported: %d\n", 48 WARN(1, "device type not supported: %d\n", skb->dev->type);
76 skb->dev->type);
77 rc = -EINVAL;
78 } 49 }
79 return rc; 50 return rc;
80} 51}
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index be47ac427f6..7af1ff2d1f1 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -32,21 +32,23 @@ static void llc_ui_format_mac(struct seq_file *seq, u8 *addr)
32 32
33static struct sock *llc_get_sk_idx(loff_t pos) 33static struct sock *llc_get_sk_idx(loff_t pos)
34{ 34{
35 struct list_head *sap_entry;
36 struct llc_sap *sap; 35 struct llc_sap *sap;
37 struct hlist_node *node;
38 struct sock *sk = NULL; 36 struct sock *sk = NULL;
39 37 int i;
40 list_for_each(sap_entry, &llc_sap_list) { 38
41 sap = list_entry(sap_entry, struct llc_sap, node); 39 list_for_each_entry_rcu(sap, &llc_sap_list, node) {
42 40 spin_lock_bh(&sap->sk_lock);
43 read_lock_bh(&sap->sk_list.lock); 41 for (i = 0; i < LLC_SK_LADDR_HASH_ENTRIES; i++) {
44 sk_for_each(sk, node, &sap->sk_list.list) { 42 struct hlist_nulls_head *head = &sap->sk_laddr_hash[i];
45 if (!pos) 43 struct hlist_nulls_node *node;
46 goto found; 44
47 --pos; 45 sk_nulls_for_each(sk, node, head) {
46 if (!pos)
47 goto found; /* keep the lock */
48 --pos;
49 }
48 } 50 }
49 read_unlock_bh(&sap->sk_list.lock); 51 spin_unlock_bh(&sap->sk_lock);
50 } 52 }
51 sk = NULL; 53 sk = NULL;
52found: 54found:
@@ -57,10 +59,23 @@ static void *llc_seq_start(struct seq_file *seq, loff_t *pos)
57{ 59{
58 loff_t l = *pos; 60 loff_t l = *pos;
59 61
60 read_lock_bh(&llc_sap_list_lock); 62 rcu_read_lock_bh();
61 return l ? llc_get_sk_idx(--l) : SEQ_START_TOKEN; 63 return l ? llc_get_sk_idx(--l) : SEQ_START_TOKEN;
62} 64}
63 65
66static struct sock *laddr_hash_next(struct llc_sap *sap, int bucket)
67{
68 struct hlist_nulls_node *node;
69 struct sock *sk = NULL;
70
71 while (++bucket < LLC_SK_LADDR_HASH_ENTRIES)
72 sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket])
73 goto out;
74
75out:
76 return sk;
77}
78
64static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos) 79static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
65{ 80{
66 struct sock* sk, *next; 81 struct sock* sk, *next;
@@ -73,25 +88,23 @@ static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
73 goto out; 88 goto out;
74 } 89 }
75 sk = v; 90 sk = v;
76 next = sk_next(sk); 91 next = sk_nulls_next(sk);
77 if (next) { 92 if (next) {
78 sk = next; 93 sk = next;
79 goto out; 94 goto out;
80 } 95 }
81 llc = llc_sk(sk); 96 llc = llc_sk(sk);
82 sap = llc->sap; 97 sap = llc->sap;
83 read_unlock_bh(&sap->sk_list.lock); 98 sk = laddr_hash_next(sap, llc_sk_laddr_hashfn(sap, &llc->laddr));
84 sk = NULL; 99 if (sk)
85 for (;;) { 100 goto out;
86 if (sap->node.next == &llc_sap_list) 101 spin_unlock_bh(&sap->sk_lock);
87 break; 102 list_for_each_entry_continue_rcu(sap, &llc_sap_list, node) {
88 sap = list_entry(sap->node.next, struct llc_sap, node); 103 spin_lock_bh(&sap->sk_lock);
89 read_lock_bh(&sap->sk_list.lock); 104 sk = laddr_hash_next(sap, -1);
90 if (!hlist_empty(&sap->sk_list.list)) { 105 if (sk)
91 sk = sk_head(&sap->sk_list.list); 106 break; /* keep the lock */
92 break; 107 spin_unlock_bh(&sap->sk_lock);
93 }
94 read_unlock_bh(&sap->sk_list.lock);
95 } 108 }
96out: 109out:
97 return sk; 110 return sk;
@@ -104,9 +117,9 @@ static void llc_seq_stop(struct seq_file *seq, void *v)
104 struct llc_sock *llc = llc_sk(sk); 117 struct llc_sock *llc = llc_sk(sk);
105 struct llc_sap *sap = llc->sap; 118 struct llc_sap *sap = llc->sap;
106 119
107 read_unlock_bh(&sap->sk_list.lock); 120 spin_unlock_bh(&sap->sk_lock);
108 } 121 }
109 read_unlock_bh(&llc_sap_list_lock); 122 rcu_read_unlock_bh();
110} 123}
111 124
112static int llc_seq_socket_show(struct seq_file *seq, void *v) 125static int llc_seq_socket_show(struct seq_file *seq, void *v)
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 008de1fc42c..ad6e6e1cf22 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -297,6 +297,17 @@ static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
297 llc_sap_state_process(sap, skb); 297 llc_sap_state_process(sap, skb);
298} 298}
299 299
300static inline bool llc_dgram_match(const struct llc_sap *sap,
301 const struct llc_addr *laddr,
302 const struct sock *sk)
303{
304 struct llc_sock *llc = llc_sk(sk);
305
306 return sk->sk_type == SOCK_DGRAM &&
307 llc->laddr.lsap == laddr->lsap &&
308 llc_mac_match(llc->laddr.mac, laddr->mac);
309}
310
300/** 311/**
301 * llc_lookup_dgram - Finds dgram socket for the local sap/mac 312 * llc_lookup_dgram - Finds dgram socket for the local sap/mac
302 * @sap: SAP 313 * @sap: SAP
@@ -309,25 +320,68 @@ static struct sock *llc_lookup_dgram(struct llc_sap *sap,
309 const struct llc_addr *laddr) 320 const struct llc_addr *laddr)
310{ 321{
311 struct sock *rc; 322 struct sock *rc;
312 struct hlist_node *node; 323 struct hlist_nulls_node *node;
313 324 int slot = llc_sk_laddr_hashfn(sap, laddr);
314 read_lock_bh(&sap->sk_list.lock); 325 struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
315 sk_for_each(rc, node, &sap->sk_list.list) { 326
316 struct llc_sock *llc = llc_sk(rc); 327 rcu_read_lock_bh();
317 328again:
318 if (rc->sk_type == SOCK_DGRAM && 329 sk_nulls_for_each_rcu(rc, node, laddr_hb) {
319 llc->laddr.lsap == laddr->lsap && 330 if (llc_dgram_match(sap, laddr, rc)) {
320 llc_mac_match(llc->laddr.mac, laddr->mac)) { 331 /* Extra checks required by SLAB_DESTROY_BY_RCU */
321 sock_hold(rc); 332 if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
333 goto again;
334 if (unlikely(llc_sk(rc)->sap != sap ||
335 !llc_dgram_match(sap, laddr, rc))) {
336 sock_put(rc);
337 continue;
338 }
322 goto found; 339 goto found;
323 } 340 }
324 } 341 }
325 rc = NULL; 342 rc = NULL;
343 /*
344 * if the nulls value we got at the end of this lookup is
345 * not the expected one, we must restart lookup.
346 * We probably met an item that was moved to another chain.
347 */
348 if (unlikely(get_nulls_value(node) != slot))
349 goto again;
326found: 350found:
327 read_unlock_bh(&sap->sk_list.lock); 351 rcu_read_unlock_bh();
328 return rc; 352 return rc;
329} 353}
330 354
355static inline bool llc_mcast_match(const struct llc_sap *sap,
356 const struct llc_addr *laddr,
357 const struct sk_buff *skb,
358 const struct sock *sk)
359{
360 struct llc_sock *llc = llc_sk(sk);
361
362 return sk->sk_type == SOCK_DGRAM &&
363 llc->laddr.lsap == laddr->lsap &&
364 llc->dev == skb->dev;
365}
366
367static void llc_do_mcast(struct llc_sap *sap, struct sk_buff *skb,
368 struct sock **stack, int count)
369{
370 struct sk_buff *skb1;
371 int i;
372
373 for (i = 0; i < count; i++) {
374 skb1 = skb_clone(skb, GFP_ATOMIC);
375 if (!skb1) {
376 sock_put(stack[i]);
377 continue;
378 }
379
380 llc_sap_rcv(sap, skb1, stack[i]);
381 sock_put(stack[i]);
382 }
383}
384
331/** 385/**
332 * llc_sap_mcast - Deliver multicast PDU's to all matching datagram sockets. 386 * llc_sap_mcast - Deliver multicast PDU's to all matching datagram sockets.
333 * @sap: SAP 387 * @sap: SAP
@@ -340,32 +394,31 @@ static void llc_sap_mcast(struct llc_sap *sap,
340 const struct llc_addr *laddr, 394 const struct llc_addr *laddr,
341 struct sk_buff *skb) 395 struct sk_buff *skb)
342{ 396{
343 struct sock *sk; 397 int i = 0, count = 256 / sizeof(struct sock *);
398 struct sock *sk, *stack[count];
344 struct hlist_node *node; 399 struct hlist_node *node;
400 struct llc_sock *llc;
401 struct hlist_head *dev_hb = llc_sk_dev_hash(sap, skb->dev->ifindex);
345 402
346 read_lock_bh(&sap->sk_list.lock); 403 spin_lock_bh(&sap->sk_lock);
347 sk_for_each(sk, node, &sap->sk_list.list) { 404 hlist_for_each_entry(llc, node, dev_hb, dev_hash_node) {
348 struct llc_sock *llc = llc_sk(sk);
349 struct sk_buff *skb1;
350 405
351 if (sk->sk_type != SOCK_DGRAM) 406 sk = &llc->sk;
352 continue;
353 407
354 if (llc->laddr.lsap != laddr->lsap) 408 if (!llc_mcast_match(sap, laddr, skb, sk))
355 continue; 409 continue;
356 410
357 if (llc->dev != skb->dev)
358 continue;
359
360 skb1 = skb_clone(skb, GFP_ATOMIC);
361 if (!skb1)
362 break;
363
364 sock_hold(sk); 411 sock_hold(sk);
365 llc_sap_rcv(sap, skb1, sk); 412 if (i < count)
366 sock_put(sk); 413 stack[i++] = sk;
414 else {
415 llc_do_mcast(sap, skb, stack, i);
416 i = 0;
417 }
367 } 418 }
368 read_unlock_bh(&sap->sk_list.lock); 419 spin_unlock_bh(&sap->sk_lock);
420
421 llc_do_mcast(sap, skb, stack, i);
369} 422}
370 423
371 424
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index a10d508b07e..a952b7f8c64 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -96,18 +96,6 @@ menuconfig MAC80211_DEBUG_MENU
96 ---help--- 96 ---help---
97 This option collects various mac80211 debug settings. 97 This option collects various mac80211 debug settings.
98 98
99config MAC80211_DEBUG_PACKET_ALIGNMENT
100 bool "Enable packet alignment debugging"
101 depends on MAC80211_DEBUG_MENU
102 ---help---
103 This option is recommended for driver authors and strongly
104 discouraged for everybody else, it will trigger a warning
105 when a driver hands mac80211 a buffer that is aligned in
106 a way that will cause problems with the IP stack on some
107 architectures.
108
109 Say N unless you're writing a mac80211 based driver.
110
111config MAC80211_NOINLINE 99config MAC80211_NOINLINE
112 bool "Do not inline TX/RX handlers" 100 bool "Do not inline TX/RX handlers"
113 depends on MAC80211_DEBUG_MENU 101 depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 298cfcc1bf8..04420291e7a 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -6,10 +6,10 @@ mac80211-y := \
6 sta_info.o \ 6 sta_info.o \
7 wep.o \ 7 wep.o \
8 wpa.o \ 8 wpa.o \
9 scan.o \ 9 scan.o offchannel.o \
10 ht.o agg-tx.o agg-rx.o \ 10 ht.o agg-tx.o agg-rx.o \
11 ibss.o \ 11 ibss.o \
12 mlme.o \ 12 mlme.o work.o \
13 iface.o \ 13 iface.o \
14 rate.o \ 14 rate.o \
15 michael.o \ 15 michael.o \
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 51c7dc3c4c3..a978e666ed6 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -41,8 +41,7 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
41 sta->sta.addr, tid); 41 sta->sta.addr, tid);
42#endif /* CONFIG_MAC80211_HT_DEBUG */ 42#endif /* CONFIG_MAC80211_HT_DEBUG */
43 43
44 if (drv_ampdu_action(local, &sta->sdata->vif, 44 if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
45 IEEE80211_AMPDU_RX_STOP,
46 &sta->sta, tid, NULL)) 45 &sta->sta, tid, NULL))
47 printk(KERN_DEBUG "HW problem - can not stop rx " 46 printk(KERN_DEBUG "HW problem - can not stop rx "
48 "aggregation for tid %d\n", tid); 47 "aggregation for tid %d\n", tid);
@@ -83,12 +82,11 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
83void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, 82void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid,
84 u16 initiator, u16 reason) 83 u16 initiator, u16 reason)
85{ 84{
86 struct ieee80211_local *local = sdata->local;
87 struct sta_info *sta; 85 struct sta_info *sta;
88 86
89 rcu_read_lock(); 87 rcu_read_lock();
90 88
91 sta = sta_info_get(local, ra); 89 sta = sta_info_get(sdata, ra);
92 if (!sta) { 90 if (!sta) {
93 rcu_read_unlock(); 91 rcu_read_unlock();
94 return; 92 return;
@@ -136,7 +134,7 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
136 134
137 if (!skb) { 135 if (!skb) {
138 printk(KERN_DEBUG "%s: failed to allocate buffer " 136 printk(KERN_DEBUG "%s: failed to allocate buffer "
139 "for addba resp frame\n", sdata->dev->name); 137 "for addba resp frame\n", sdata->name);
140 return; 138 return;
141 } 139 }
142 140
@@ -144,10 +142,10 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
144 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 142 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
145 memset(mgmt, 0, 24); 143 memset(mgmt, 0, 24);
146 memcpy(mgmt->da, da, ETH_ALEN); 144 memcpy(mgmt->da, da, ETH_ALEN);
147 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 145 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
148 if (sdata->vif.type == NL80211_IFTYPE_AP || 146 if (sdata->vif.type == NL80211_IFTYPE_AP ||
149 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 147 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
150 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 148 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
151 else if (sdata->vif.type == NL80211_IFTYPE_STATION) 149 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
152 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); 150 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
153 151
@@ -281,8 +279,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
281 goto end; 279 goto end;
282 } 280 }
283 281
284 ret = drv_ampdu_action(local, &sta->sdata->vif, 282 ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
285 IEEE80211_AMPDU_RX_START,
286 &sta->sta, tid, &start_seq_num); 283 &sta->sta, tid, &start_seq_num);
287#ifdef CONFIG_MAC80211_HT_DEBUG 284#ifdef CONFIG_MAC80211_HT_DEBUG
288 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret); 285 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 5e3a7eccef5..5538e1b4a69 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -58,17 +58,17 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
58 58
59 if (!skb) { 59 if (!skb) {
60 printk(KERN_ERR "%s: failed to allocate buffer " 60 printk(KERN_ERR "%s: failed to allocate buffer "
61 "for addba request frame\n", sdata->dev->name); 61 "for addba request frame\n", sdata->name);
62 return; 62 return;
63 } 63 }
64 skb_reserve(skb, local->hw.extra_tx_headroom); 64 skb_reserve(skb, local->hw.extra_tx_headroom);
65 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 65 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
66 memset(mgmt, 0, 24); 66 memset(mgmt, 0, 24);
67 memcpy(mgmt->da, da, ETH_ALEN); 67 memcpy(mgmt->da, da, ETH_ALEN);
68 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 68 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
69 if (sdata->vif.type == NL80211_IFTYPE_AP || 69 if (sdata->vif.type == NL80211_IFTYPE_AP ||
70 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 70 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
71 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 71 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
72 else if (sdata->vif.type == NL80211_IFTYPE_STATION) 72 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
73 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); 73 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
74 74
@@ -104,7 +104,7 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
104 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); 104 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
105 if (!skb) { 105 if (!skb) {
106 printk(KERN_ERR "%s: failed to allocate buffer for " 106 printk(KERN_ERR "%s: failed to allocate buffer for "
107 "bar frame\n", sdata->dev->name); 107 "bar frame\n", sdata->name);
108 return; 108 return;
109 } 109 }
110 skb_reserve(skb, local->hw.extra_tx_headroom); 110 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -113,7 +113,7 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
113 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | 113 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
114 IEEE80211_STYPE_BACK_REQ); 114 IEEE80211_STYPE_BACK_REQ);
115 memcpy(bar->ra, ra, ETH_ALEN); 115 memcpy(bar->ra, ra, ETH_ALEN);
116 memcpy(bar->ta, sdata->dev->dev_addr, ETH_ALEN); 116 memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
117 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL; 117 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
118 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA; 118 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
119 bar_control |= (u16)(tid << 12); 119 bar_control |= (u16)(tid << 12);
@@ -144,7 +144,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
144 *state = HT_AGG_STATE_REQ_STOP_BA_MSK | 144 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
145 (initiator << HT_AGG_STATE_INITIATOR_SHIFT); 145 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
146 146
147 ret = drv_ampdu_action(local, &sta->sdata->vif, 147 ret = drv_ampdu_action(local, sta->sdata,
148 IEEE80211_AMPDU_TX_STOP, 148 IEEE80211_AMPDU_TX_STOP,
149 &sta->sta, tid, NULL); 149 &sta->sta, tid, NULL);
150 150
@@ -179,7 +179,8 @@ static void sta_addba_resp_timer_expired(unsigned long data)
179 179
180 /* check if the TID waits for addBA response */ 180 /* check if the TID waits for addBA response */
181 spin_lock_bh(&sta->lock); 181 spin_lock_bh(&sta->lock);
182 if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK)) != 182 if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK |
183 HT_AGG_STATE_REQ_STOP_BA_MSK)) !=
183 HT_ADDBA_REQUESTED_MSK) { 184 HT_ADDBA_REQUESTED_MSK) {
184 spin_unlock_bh(&sta->lock); 185 spin_unlock_bh(&sta->lock);
185 *state = HT_AGG_STATE_IDLE; 186 *state = HT_AGG_STATE_IDLE;
@@ -236,6 +237,14 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
236 sdata->vif.type != NL80211_IFTYPE_AP) 237 sdata->vif.type != NL80211_IFTYPE_AP)
237 return -EINVAL; 238 return -EINVAL;
238 239
240 if (test_sta_flags(sta, WLAN_STA_DISASSOC)) {
241#ifdef CONFIG_MAC80211_HT_DEBUG
242 printk(KERN_DEBUG "Disassociation is in progress. "
243 "Denying BA session request\n");
244#endif
245 return -EINVAL;
246 }
247
239 if (test_sta_flags(sta, WLAN_STA_SUSPEND)) { 248 if (test_sta_flags(sta, WLAN_STA_SUSPEND)) {
240#ifdef CONFIG_MAC80211_HT_DEBUG 249#ifdef CONFIG_MAC80211_HT_DEBUG
241 printk(KERN_DEBUG "Suspend in progress. " 250 printk(KERN_DEBUG "Suspend in progress. "
@@ -301,10 +310,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
301 * call back right away, it must see that the flow has begun */ 310 * call back right away, it must see that the flow has begun */
302 *state |= HT_ADDBA_REQUESTED_MSK; 311 *state |= HT_ADDBA_REQUESTED_MSK;
303 312
304 start_seq_num = sta->tid_seq[tid]; 313 start_seq_num = sta->tid_seq[tid] >> 4;
305 314
306 ret = drv_ampdu_action(local, &sdata->vif, 315 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
307 IEEE80211_AMPDU_TX_START,
308 pubsta, tid, &start_seq_num); 316 pubsta, tid, &start_seq_num);
309 317
310 if (ret) { 318 if (ret) {
@@ -420,7 +428,7 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
420 ieee80211_agg_splice_finish(local, sta, tid); 428 ieee80211_agg_splice_finish(local, sta, tid);
421 spin_unlock(&local->ampdu_lock); 429 spin_unlock(&local->ampdu_lock);
422 430
423 drv_ampdu_action(local, &sta->sdata->vif, 431 drv_ampdu_action(local, sta->sdata,
424 IEEE80211_AMPDU_TX_OPERATIONAL, 432 IEEE80211_AMPDU_TX_OPERATIONAL,
425 &sta->sta, tid, NULL); 433 &sta->sta, tid, NULL);
426} 434}
@@ -441,7 +449,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
441 } 449 }
442 450
443 rcu_read_lock(); 451 rcu_read_lock();
444 sta = sta_info_get(local, ra); 452 sta = sta_info_get(sdata, ra);
445 if (!sta) { 453 if (!sta) {
446 rcu_read_unlock(); 454 rcu_read_unlock();
447#ifdef CONFIG_MAC80211_HT_DEBUG 455#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -489,7 +497,7 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
489#ifdef CONFIG_MAC80211_HT_DEBUG 497#ifdef CONFIG_MAC80211_HT_DEBUG
490 if (net_ratelimit()) 498 if (net_ratelimit())
491 printk(KERN_WARNING "%s: Not enough memory, " 499 printk(KERN_WARNING "%s: Not enough memory, "
492 "dropping start BA session", skb->dev->name); 500 "dropping start BA session", sdata->name);
493#endif 501#endif
494 return; 502 return;
495 } 503 }
@@ -564,7 +572,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
564#endif /* CONFIG_MAC80211_HT_DEBUG */ 572#endif /* CONFIG_MAC80211_HT_DEBUG */
565 573
566 rcu_read_lock(); 574 rcu_read_lock();
567 sta = sta_info_get(local, ra); 575 sta = sta_info_get(sdata, ra);
568 if (!sta) { 576 if (!sta) {
569#ifdef CONFIG_MAC80211_HT_DEBUG 577#ifdef CONFIG_MAC80211_HT_DEBUG
570 printk(KERN_DEBUG "Could not find station: %pM\n", ra); 578 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
@@ -621,7 +629,7 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
621#ifdef CONFIG_MAC80211_HT_DEBUG 629#ifdef CONFIG_MAC80211_HT_DEBUG
622 if (net_ratelimit()) 630 if (net_ratelimit())
623 printk(KERN_WARNING "%s: Not enough memory, " 631 printk(KERN_WARNING "%s: Not enough memory, "
624 "dropping stop BA session", skb->dev->name); 632 "dropping stop BA session", sdata->name);
625#endif 633#endif
626 return; 634 return;
627 } 635 }
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 9ae1a4760b5..e1731b7c252 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -78,17 +78,15 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
78 enum nl80211_iftype type, u32 *flags, 78 enum nl80211_iftype type, u32 *flags,
79 struct vif_params *params) 79 struct vif_params *params)
80{ 80{
81 struct ieee80211_sub_if_data *sdata; 81 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
82 int ret; 82 int ret;
83 83
84 if (netif_running(dev)) 84 if (ieee80211_sdata_running(sdata))
85 return -EBUSY; 85 return -EBUSY;
86 86
87 if (!nl80211_params_check(type, params)) 87 if (!nl80211_params_check(type, params))
88 return -EINVAL; 88 return -EINVAL;
89 89
90 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
91
92 ret = ieee80211_if_change_type(sdata, type); 90 ret = ieee80211_if_change_type(sdata, type);
93 if (ret) 91 if (ret)
94 return ret; 92 return ret;
@@ -150,7 +148,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
150 rcu_read_lock(); 148 rcu_read_lock();
151 149
152 if (mac_addr) { 150 if (mac_addr) {
153 sta = sta_info_get(sdata->local, mac_addr); 151 sta = sta_info_get_bss(sdata, mac_addr);
154 if (!sta) { 152 if (!sta) {
155 ieee80211_key_free(key); 153 ieee80211_key_free(key);
156 err = -ENOENT; 154 err = -ENOENT;
@@ -181,7 +179,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
181 if (mac_addr) { 179 if (mac_addr) {
182 ret = -ENOENT; 180 ret = -ENOENT;
183 181
184 sta = sta_info_get(sdata->local, mac_addr); 182 sta = sta_info_get_bss(sdata, mac_addr);
185 if (!sta) 183 if (!sta)
186 goto out_unlock; 184 goto out_unlock;
187 185
@@ -228,7 +226,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
228 rcu_read_lock(); 226 rcu_read_lock();
229 227
230 if (mac_addr) { 228 if (mac_addr) {
231 sta = sta_info_get(sdata->local, mac_addr); 229 sta = sta_info_get_bss(sdata, mac_addr);
232 if (!sta) 230 if (!sta)
233 goto out; 231 goto out;
234 232
@@ -415,15 +413,13 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
415static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, 413static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
416 u8 *mac, struct station_info *sinfo) 414 u8 *mac, struct station_info *sinfo)
417{ 415{
418 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 416 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
419 struct sta_info *sta; 417 struct sta_info *sta;
420 int ret = -ENOENT; 418 int ret = -ENOENT;
421 419
422 rcu_read_lock(); 420 rcu_read_lock();
423 421
424 /* XXX: verify sta->dev == dev */ 422 sta = sta_info_get_bss(sdata, mac);
425
426 sta = sta_info_get(local, mac);
427 if (sta) { 423 if (sta) {
428 ret = 0; 424 ret = 0;
429 sta_set_sinfo(sta, sinfo); 425 sta_set_sinfo(sta, sinfo);
@@ -519,6 +515,8 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
519 if (old) 515 if (old)
520 memcpy(new->tail, old->tail, new_tail_len); 516 memcpy(new->tail, old->tail, new_tail_len);
521 517
518 sdata->vif.bss_conf.dtim_period = new->dtim_period;
519
522 rcu_assign_pointer(sdata->u.ap.beacon, new); 520 rcu_assign_pointer(sdata->u.ap.beacon, new);
523 521
524 synchronize_rcu(); 522 synchronize_rcu();
@@ -732,7 +730,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
732 } else 730 } else
733 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 731 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
734 732
735 if (compare_ether_addr(mac, dev->dev_addr) == 0) 733 if (compare_ether_addr(mac, sdata->vif.addr) == 0)
736 return -EINVAL; 734 return -EINVAL;
737 735
738 if (is_multicast_ether_addr(mac)) 736 if (is_multicast_ether_addr(mac))
@@ -751,9 +749,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
751 layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 749 layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
752 sdata->vif.type == NL80211_IFTYPE_AP; 750 sdata->vif.type == NL80211_IFTYPE_AP;
753 751
754 rcu_read_lock(); 752 err = sta_info_insert_rcu(sta);
755
756 err = sta_info_insert(sta);
757 if (err) { 753 if (err) {
758 rcu_read_unlock(); 754 rcu_read_unlock();
759 return err; 755 return err;
@@ -772,27 +768,13 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
772{ 768{
773 struct ieee80211_local *local = wiphy_priv(wiphy); 769 struct ieee80211_local *local = wiphy_priv(wiphy);
774 struct ieee80211_sub_if_data *sdata; 770 struct ieee80211_sub_if_data *sdata;
775 struct sta_info *sta;
776 771
777 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 772 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
778 773
779 if (mac) { 774 if (mac)
780 rcu_read_lock(); 775 return sta_info_destroy_addr_bss(sdata, mac);
781
782 /* XXX: get sta belonging to dev */
783 sta = sta_info_get(local, mac);
784 if (!sta) {
785 rcu_read_unlock();
786 return -ENOENT;
787 }
788
789 sta_info_unlink(&sta);
790 rcu_read_unlock();
791
792 sta_info_destroy(sta);
793 } else
794 sta_info_flush(local, sdata);
795 776
777 sta_info_flush(local, sdata);
796 return 0; 778 return 0;
797} 779}
798 780
@@ -801,14 +783,14 @@ static int ieee80211_change_station(struct wiphy *wiphy,
801 u8 *mac, 783 u8 *mac,
802 struct station_parameters *params) 784 struct station_parameters *params)
803{ 785{
786 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
804 struct ieee80211_local *local = wiphy_priv(wiphy); 787 struct ieee80211_local *local = wiphy_priv(wiphy);
805 struct sta_info *sta; 788 struct sta_info *sta;
806 struct ieee80211_sub_if_data *vlansdata; 789 struct ieee80211_sub_if_data *vlansdata;
807 790
808 rcu_read_lock(); 791 rcu_read_lock();
809 792
810 /* XXX: get sta belonging to dev */ 793 sta = sta_info_get_bss(sdata, mac);
811 sta = sta_info_get(local, mac);
812 if (!sta) { 794 if (!sta) {
813 rcu_read_unlock(); 795 rcu_read_unlock();
814 return -ENOENT; 796 return -ENOENT;
@@ -847,7 +829,6 @@ static int ieee80211_change_station(struct wiphy *wiphy,
847static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, 829static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
848 u8 *dst, u8 *next_hop) 830 u8 *dst, u8 *next_hop)
849{ 831{
850 struct ieee80211_local *local = wiphy_priv(wiphy);
851 struct ieee80211_sub_if_data *sdata; 832 struct ieee80211_sub_if_data *sdata;
852 struct mesh_path *mpath; 833 struct mesh_path *mpath;
853 struct sta_info *sta; 834 struct sta_info *sta;
@@ -856,7 +837,7 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
856 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 837 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
857 838
858 rcu_read_lock(); 839 rcu_read_lock();
859 sta = sta_info_get(local, next_hop); 840 sta = sta_info_get(sdata, next_hop);
860 if (!sta) { 841 if (!sta) {
861 rcu_read_unlock(); 842 rcu_read_unlock();
862 return -ENOENT; 843 return -ENOENT;
@@ -895,7 +876,6 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
895 struct net_device *dev, 876 struct net_device *dev,
896 u8 *dst, u8 *next_hop) 877 u8 *dst, u8 *next_hop)
897{ 878{
898 struct ieee80211_local *local = wiphy_priv(wiphy);
899 struct ieee80211_sub_if_data *sdata; 879 struct ieee80211_sub_if_data *sdata;
900 struct mesh_path *mpath; 880 struct mesh_path *mpath;
901 struct sta_info *sta; 881 struct sta_info *sta;
@@ -904,7 +884,7 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
904 884
905 rcu_read_lock(); 885 rcu_read_lock();
906 886
907 sta = sta_info_get(local, next_hop); 887 sta = sta_info_get(sdata, next_hop);
908 if (!sta) { 888 if (!sta) {
909 rcu_read_unlock(); 889 rcu_read_unlock();
910 return -ENOENT; 890 return -ENOENT;
@@ -1092,6 +1072,13 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
1092 params->use_short_preamble; 1072 params->use_short_preamble;
1093 changed |= BSS_CHANGED_ERP_PREAMBLE; 1073 changed |= BSS_CHANGED_ERP_PREAMBLE;
1094 } 1074 }
1075
1076 if (!sdata->vif.bss_conf.use_short_slot &&
1077 sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) {
1078 sdata->vif.bss_conf.use_short_slot = true;
1079 changed |= BSS_CHANGED_ERP_SLOT;
1080 }
1081
1095 if (params->use_short_slot_time >= 0) { 1082 if (params->use_short_slot_time >= 0) {
1096 sdata->vif.bss_conf.use_short_slot = 1083 sdata->vif.bss_conf.use_short_slot =
1097 params->use_short_slot_time; 1084 params->use_short_slot_time;
@@ -1135,6 +1122,13 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1135 p.cw_max = params->cwmax; 1122 p.cw_max = params->cwmax;
1136 p.cw_min = params->cwmin; 1123 p.cw_min = params->cwmin;
1137 p.txop = params->txop; 1124 p.txop = params->txop;
1125
1126 /*
1127 * Setting tx queue params disables u-apsd because it's only
1128 * called in master mode.
1129 */
1130 p.uapsd = false;
1131
1138 if (drv_conf_tx(local, params->queue, &p)) { 1132 if (drv_conf_tx(local, params->queue, &p)) {
1139 printk(KERN_DEBUG "%s: failed to set TX queue " 1133 printk(KERN_DEBUG "%s: failed to set TX queue "
1140 "parameters for queue %d\n", 1134 "parameters for queue %d\n",
@@ -1237,6 +1231,13 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
1237 struct ieee80211_local *local = wiphy_priv(wiphy); 1231 struct ieee80211_local *local = wiphy_priv(wiphy);
1238 int err; 1232 int err;
1239 1233
1234 if (changed & WIPHY_PARAM_COVERAGE_CLASS) {
1235 err = drv_set_coverage_class(local, wiphy->coverage_class);
1236
1237 if (err)
1238 return err;
1239 }
1240
1240 if (changed & WIPHY_PARAM_RTS_THRESHOLD) { 1241 if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
1241 err = drv_set_rts_threshold(local, wiphy->rts_threshold); 1242 err = drv_set_rts_threshold(local, wiphy->rts_threshold);
1242 1243
@@ -1324,6 +1325,50 @@ static int ieee80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
1324} 1325}
1325#endif 1326#endif
1326 1327
1328int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
1329 enum ieee80211_smps_mode smps_mode)
1330{
1331 const u8 *ap;
1332 enum ieee80211_smps_mode old_req;
1333 int err;
1334
1335 old_req = sdata->u.mgd.req_smps;
1336 sdata->u.mgd.req_smps = smps_mode;
1337
1338 if (old_req == smps_mode &&
1339 smps_mode != IEEE80211_SMPS_AUTOMATIC)
1340 return 0;
1341
1342 /*
1343 * If not associated, or current association is not an HT
1344 * association, there's no need to send an action frame.
1345 */
1346 if (!sdata->u.mgd.associated ||
1347 sdata->local->oper_channel_type == NL80211_CHAN_NO_HT) {
1348 mutex_lock(&sdata->local->iflist_mtx);
1349 ieee80211_recalc_smps(sdata->local, sdata);
1350 mutex_unlock(&sdata->local->iflist_mtx);
1351 return 0;
1352 }
1353
1354 ap = sdata->u.mgd.associated->bssid;
1355
1356 if (smps_mode == IEEE80211_SMPS_AUTOMATIC) {
1357 if (sdata->u.mgd.powersave)
1358 smps_mode = IEEE80211_SMPS_DYNAMIC;
1359 else
1360 smps_mode = IEEE80211_SMPS_OFF;
1361 }
1362
1363 /* send SM PS frame to AP */
1364 err = ieee80211_send_smps_action(sdata, smps_mode,
1365 ap, ap);
1366 if (err)
1367 sdata->u.mgd.req_smps = old_req;
1368
1369 return err;
1370}
1371
1327static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, 1372static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
1328 bool enabled, int timeout) 1373 bool enabled, int timeout)
1329{ 1374{
@@ -1344,6 +1389,11 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
1344 sdata->u.mgd.powersave = enabled; 1389 sdata->u.mgd.powersave = enabled;
1345 conf->dynamic_ps_timeout = timeout; 1390 conf->dynamic_ps_timeout = timeout;
1346 1391
1392 /* no change, but if automatic follow powersave */
1393 mutex_lock(&sdata->u.mgd.mtx);
1394 __ieee80211_request_smps(sdata, sdata->u.mgd.req_smps);
1395 mutex_unlock(&sdata->u.mgd.mtx);
1396
1347 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) 1397 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
1348 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 1398 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
1349 1399
@@ -1359,39 +1409,43 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
1359{ 1409{
1360 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1410 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1361 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1411 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1362 int i, err = -EINVAL; 1412 int i;
1363 u32 target_rate;
1364 struct ieee80211_supported_band *sband;
1365 1413
1366 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1414 /*
1415 * This _could_ be supported by providing a hook for
1416 * drivers for this function, but at this point it
1417 * doesn't seem worth bothering.
1418 */
1419 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
1420 return -EOPNOTSUPP;
1367 1421
1368 /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates
1369 * target_rate = X, rate->fixed = 1 means only rate X
1370 * target_rate = X, rate->fixed = 0 means all rates <= X */
1371 sdata->max_ratectrl_rateidx = -1;
1372 sdata->force_unicast_rateidx = -1;
1373 1422
1374 if (mask->fixed) 1423 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
1375 target_rate = mask->fixed / 100; 1424 sdata->rc_rateidx_mask[i] = mask->control[i].legacy;
1376 else if (mask->maxrate)
1377 target_rate = mask->maxrate / 100;
1378 else
1379 return 0;
1380 1425
1381 for (i=0; i< sband->n_bitrates; i++) { 1426 return 0;
1382 struct ieee80211_rate *brate = &sband->bitrates[i]; 1427}
1383 int this_rate = brate->bitrate;
1384 1428
1385 if (target_rate == this_rate) { 1429static int ieee80211_remain_on_channel(struct wiphy *wiphy,
1386 sdata->max_ratectrl_rateidx = i; 1430 struct net_device *dev,
1387 if (mask->fixed) 1431 struct ieee80211_channel *chan,
1388 sdata->force_unicast_rateidx = i; 1432 enum nl80211_channel_type channel_type,
1389 err = 0; 1433 unsigned int duration,
1390 break; 1434 u64 *cookie)
1391 } 1435{
1392 } 1436 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1393 1437
1394 return err; 1438 return ieee80211_wk_remain_on_channel(sdata, chan, channel_type,
1439 duration, cookie);
1440}
1441
1442static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
1443 struct net_device *dev,
1444 u64 cookie)
1445{
1446 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1447
1448 return ieee80211_wk_cancel_remain_on_channel(sdata, cookie);
1395} 1449}
1396 1450
1397struct cfg80211_ops mac80211_config_ops = { 1451struct cfg80211_ops mac80211_config_ops = {
@@ -1440,4 +1494,6 @@ struct cfg80211_ops mac80211_config_ops = {
1440 CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd) 1494 CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd)
1441 .set_power_mgmt = ieee80211_set_power_mgmt, 1495 .set_power_mgmt = ieee80211_set_power_mgmt,
1442 .set_bitrate_mask = ieee80211_set_bitrate_mask, 1496 .set_bitrate_mask = ieee80211_set_bitrate_mask,
1497 .remain_on_channel = ieee80211_remain_on_channel,
1498 .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel,
1443}; 1499};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index e4b54093d41..637929b65cc 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -158,6 +158,130 @@ static const struct file_operations noack_ops = {
158 .open = mac80211_open_file_generic 158 .open = mac80211_open_file_generic
159}; 159};
160 160
161static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf,
162 size_t count, loff_t *ppos)
163{
164 struct ieee80211_local *local = file->private_data;
165 int res;
166 char buf[10];
167
168 res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_queues);
169
170 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
171}
172
173static ssize_t uapsd_queues_write(struct file *file,
174 const char __user *user_buf,
175 size_t count, loff_t *ppos)
176{
177 struct ieee80211_local *local = file->private_data;
178 unsigned long val;
179 char buf[10];
180 size_t len;
181 int ret;
182
183 len = min(count, sizeof(buf) - 1);
184 if (copy_from_user(buf, user_buf, len))
185 return -EFAULT;
186 buf[len] = '\0';
187
188 ret = strict_strtoul(buf, 0, &val);
189
190 if (ret)
191 return -EINVAL;
192
193 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
194 return -ERANGE;
195
196 local->uapsd_queues = val;
197
198 return count;
199}
200
201static const struct file_operations uapsd_queues_ops = {
202 .read = uapsd_queues_read,
203 .write = uapsd_queues_write,
204 .open = mac80211_open_file_generic
205};
206
207static ssize_t uapsd_max_sp_len_read(struct file *file, char __user *user_buf,
208 size_t count, loff_t *ppos)
209{
210 struct ieee80211_local *local = file->private_data;
211 int res;
212 char buf[10];
213
214 res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_max_sp_len);
215
216 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
217}
218
219static ssize_t uapsd_max_sp_len_write(struct file *file,
220 const char __user *user_buf,
221 size_t count, loff_t *ppos)
222{
223 struct ieee80211_local *local = file->private_data;
224 unsigned long val;
225 char buf[10];
226 size_t len;
227 int ret;
228
229 len = min(count, sizeof(buf) - 1);
230 if (copy_from_user(buf, user_buf, len))
231 return -EFAULT;
232 buf[len] = '\0';
233
234 ret = strict_strtoul(buf, 0, &val);
235
236 if (ret)
237 return -EINVAL;
238
239 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
240 return -ERANGE;
241
242 local->uapsd_max_sp_len = val;
243
244 return count;
245}
246
247static const struct file_operations uapsd_max_sp_len_ops = {
248 .read = uapsd_max_sp_len_read,
249 .write = uapsd_max_sp_len_write,
250 .open = mac80211_open_file_generic
251};
252
253static ssize_t channel_type_read(struct file *file, char __user *user_buf,
254 size_t count, loff_t *ppos)
255{
256 struct ieee80211_local *local = file->private_data;
257 const char *buf;
258
259 switch (local->hw.conf.channel_type) {
260 case NL80211_CHAN_NO_HT:
261 buf = "no ht\n";
262 break;
263 case NL80211_CHAN_HT20:
264 buf = "ht20\n";
265 break;
266 case NL80211_CHAN_HT40MINUS:
267 buf = "ht40-\n";
268 break;
269 case NL80211_CHAN_HT40PLUS:
270 buf = "ht40+\n";
271 break;
272 default:
273 buf = "???";
274 break;
275 }
276
277 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
278}
279
280static const struct file_operations channel_type_ops = {
281 .read = channel_type_read,
282 .open = mac80211_open_file_generic
283};
284
161static ssize_t queues_read(struct file *file, char __user *user_buf, 285static ssize_t queues_read(struct file *file, char __user *user_buf,
162 size_t count, loff_t *ppos) 286 size_t count, loff_t *ppos)
163{ 287{
@@ -314,6 +438,9 @@ void debugfs_hw_add(struct ieee80211_local *local)
314 DEBUGFS_ADD(queues); 438 DEBUGFS_ADD(queues);
315 DEBUGFS_ADD_MODE(reset, 0200); 439 DEBUGFS_ADD_MODE(reset, 0200);
316 DEBUGFS_ADD(noack); 440 DEBUGFS_ADD(noack);
441 DEBUGFS_ADD(uapsd_queues);
442 DEBUGFS_ADD(uapsd_max_sp_len);
443 DEBUGFS_ADD(channel_type);
317 444
318 statsd = debugfs_create_dir("statistics", phyd); 445 statsd = debugfs_create_dir("statistics", phyd);
319 446
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index e0f5224630d..d12e743cb4e 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -56,7 +56,7 @@ KEY_CONF_FILE(keyidx, D);
56KEY_CONF_FILE(hw_key_idx, D); 56KEY_CONF_FILE(hw_key_idx, D);
57KEY_FILE(flags, X); 57KEY_FILE(flags, X);
58KEY_FILE(tx_rx_count, D); 58KEY_FILE(tx_rx_count, D);
59KEY_READ(ifindex, sdata->dev->ifindex, 20, "%d\n"); 59KEY_READ(ifindex, sdata->name, IFNAMSIZ + 2, "%s\n");
60KEY_OPS(ifindex); 60KEY_OPS(ifindex);
61 61
62static ssize_t key_algorithm_read(struct file *file, 62static ssize_t key_algorithm_read(struct file *file,
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 472b2039906..9affe2cd185 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -41,6 +41,30 @@ static ssize_t ieee80211_if_read(
41 return ret; 41 return ret;
42} 42}
43 43
44static ssize_t ieee80211_if_write(
45 struct ieee80211_sub_if_data *sdata,
46 const char __user *userbuf,
47 size_t count, loff_t *ppos,
48 ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int))
49{
50 u8 *buf;
51 ssize_t ret = -ENODEV;
52
53 buf = kzalloc(count, GFP_KERNEL);
54 if (!buf)
55 return -ENOMEM;
56
57 if (copy_from_user(buf, userbuf, count))
58 return -EFAULT;
59
60 rtnl_lock();
61 if (sdata->dev->reg_state == NETREG_REGISTERED)
62 ret = (*write)(sdata, buf, count);
63 rtnl_unlock();
64
65 return ret;
66}
67
44#define IEEE80211_IF_FMT(name, field, format_string) \ 68#define IEEE80211_IF_FMT(name, field, format_string) \
45static ssize_t ieee80211_if_fmt_##name( \ 69static ssize_t ieee80211_if_fmt_##name( \
46 const struct ieee80211_sub_if_data *sdata, char *buf, \ 70 const struct ieee80211_sub_if_data *sdata, char *buf, \
@@ -71,7 +95,7 @@ static ssize_t ieee80211_if_fmt_##name( \
71 return scnprintf(buf, buflen, "%pM\n", sdata->field); \ 95 return scnprintf(buf, buflen, "%pM\n", sdata->field); \
72} 96}
73 97
74#define __IEEE80211_IF_FILE(name) \ 98#define __IEEE80211_IF_FILE(name, _write) \
75static ssize_t ieee80211_if_read_##name(struct file *file, \ 99static ssize_t ieee80211_if_read_##name(struct file *file, \
76 char __user *userbuf, \ 100 char __user *userbuf, \
77 size_t count, loff_t *ppos) \ 101 size_t count, loff_t *ppos) \
@@ -82,22 +106,99 @@ static ssize_t ieee80211_if_read_##name(struct file *file, \
82} \ 106} \
83static const struct file_operations name##_ops = { \ 107static const struct file_operations name##_ops = { \
84 .read = ieee80211_if_read_##name, \ 108 .read = ieee80211_if_read_##name, \
109 .write = (_write), \
85 .open = mac80211_open_file_generic, \ 110 .open = mac80211_open_file_generic, \
86} 111}
87 112
113#define __IEEE80211_IF_FILE_W(name) \
114static ssize_t ieee80211_if_write_##name(struct file *file, \
115 const char __user *userbuf, \
116 size_t count, loff_t *ppos) \
117{ \
118 return ieee80211_if_write(file->private_data, userbuf, count, \
119 ppos, ieee80211_if_parse_##name); \
120} \
121__IEEE80211_IF_FILE(name, ieee80211_if_write_##name)
122
123
88#define IEEE80211_IF_FILE(name, field, format) \ 124#define IEEE80211_IF_FILE(name, field, format) \
89 IEEE80211_IF_FMT_##format(name, field) \ 125 IEEE80211_IF_FMT_##format(name, field) \
90 __IEEE80211_IF_FILE(name) 126 __IEEE80211_IF_FILE(name, NULL)
91 127
92/* common attributes */ 128/* common attributes */
93IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC); 129IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC);
94IEEE80211_IF_FILE(force_unicast_rateidx, force_unicast_rateidx, DEC); 130IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
95IEEE80211_IF_FILE(max_ratectrl_rateidx, max_ratectrl_rateidx, DEC); 131 HEX);
132IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
133 HEX);
96 134
97/* STA attributes */ 135/* STA attributes */
98IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); 136IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
99IEEE80211_IF_FILE(aid, u.mgd.aid, DEC); 137IEEE80211_IF_FILE(aid, u.mgd.aid, DEC);
100IEEE80211_IF_FILE(capab, u.mgd.capab, HEX); 138
139static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
140 enum ieee80211_smps_mode smps_mode)
141{
142 struct ieee80211_local *local = sdata->local;
143 int err;
144
145 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS) &&
146 smps_mode == IEEE80211_SMPS_STATIC)
147 return -EINVAL;
148
149 /* auto should be dynamic if in PS mode */
150 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) &&
151 (smps_mode == IEEE80211_SMPS_DYNAMIC ||
152 smps_mode == IEEE80211_SMPS_AUTOMATIC))
153 return -EINVAL;
154
155 /* supported only on managed interfaces for now */
156 if (sdata->vif.type != NL80211_IFTYPE_STATION)
157 return -EOPNOTSUPP;
158
159 mutex_lock(&local->iflist_mtx);
160 err = __ieee80211_request_smps(sdata, smps_mode);
161 mutex_unlock(&local->iflist_mtx);
162
163 return err;
164}
165
166static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
167 [IEEE80211_SMPS_AUTOMATIC] = "auto",
168 [IEEE80211_SMPS_OFF] = "off",
169 [IEEE80211_SMPS_STATIC] = "static",
170 [IEEE80211_SMPS_DYNAMIC] = "dynamic",
171};
172
173static ssize_t ieee80211_if_fmt_smps(const struct ieee80211_sub_if_data *sdata,
174 char *buf, int buflen)
175{
176 if (sdata->vif.type != NL80211_IFTYPE_STATION)
177 return -EOPNOTSUPP;
178
179 return snprintf(buf, buflen, "request: %s\nused: %s\n",
180 smps_modes[sdata->u.mgd.req_smps],
181 smps_modes[sdata->u.mgd.ap_smps]);
182}
183
184static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata,
185 const char *buf, int buflen)
186{
187 enum ieee80211_smps_mode mode;
188
189 for (mode = 0; mode < IEEE80211_SMPS_NUM_MODES; mode++) {
190 if (strncmp(buf, smps_modes[mode], buflen) == 0) {
191 int err = ieee80211_set_smps(sdata, mode);
192 if (!err)
193 return buflen;
194 return err;
195 }
196 }
197
198 return -EINVAL;
199}
200
201__IEEE80211_IF_FILE_W(smps);
101 202
102/* AP attributes */ 203/* AP attributes */
103IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); 204IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
@@ -109,7 +210,7 @@ static ssize_t ieee80211_if_fmt_num_buffered_multicast(
109 return scnprintf(buf, buflen, "%u\n", 210 return scnprintf(buf, buflen, "%u\n",
110 skb_queue_len(&sdata->u.ap.ps_bc_buf)); 211 skb_queue_len(&sdata->u.ap.ps_bc_buf));
111} 212}
112__IEEE80211_IF_FILE(num_buffered_multicast); 213__IEEE80211_IF_FILE(num_buffered_multicast, NULL);
113 214
114/* WDS attributes */ 215/* WDS attributes */
115IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC); 216IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
@@ -154,46 +255,50 @@ IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
154#endif 255#endif
155 256
156 257
157#define DEBUGFS_ADD(name, type) \ 258#define DEBUGFS_ADD(name) \
158 debugfs_create_file(#name, 0400, sdata->debugfs.dir, \ 259 debugfs_create_file(#name, 0400, sdata->debugfs.dir, \
159 sdata, &name##_ops); 260 sdata, &name##_ops);
160 261
262#define DEBUGFS_ADD_MODE(name, mode) \
263 debugfs_create_file(#name, mode, sdata->debugfs.dir, \
264 sdata, &name##_ops);
265
161static void add_sta_files(struct ieee80211_sub_if_data *sdata) 266static void add_sta_files(struct ieee80211_sub_if_data *sdata)
162{ 267{
163 DEBUGFS_ADD(drop_unencrypted, sta); 268 DEBUGFS_ADD(drop_unencrypted);
164 DEBUGFS_ADD(force_unicast_rateidx, sta); 269 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
165 DEBUGFS_ADD(max_ratectrl_rateidx, sta); 270 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
166 271
167 DEBUGFS_ADD(bssid, sta); 272 DEBUGFS_ADD(bssid);
168 DEBUGFS_ADD(aid, sta); 273 DEBUGFS_ADD(aid);
169 DEBUGFS_ADD(capab, sta); 274 DEBUGFS_ADD_MODE(smps, 0600);
170} 275}
171 276
172static void add_ap_files(struct ieee80211_sub_if_data *sdata) 277static void add_ap_files(struct ieee80211_sub_if_data *sdata)
173{ 278{
174 DEBUGFS_ADD(drop_unencrypted, ap); 279 DEBUGFS_ADD(drop_unencrypted);
175 DEBUGFS_ADD(force_unicast_rateidx, ap); 280 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
176 DEBUGFS_ADD(max_ratectrl_rateidx, ap); 281 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
177 282
178 DEBUGFS_ADD(num_sta_ps, ap); 283 DEBUGFS_ADD(num_sta_ps);
179 DEBUGFS_ADD(dtim_count, ap); 284 DEBUGFS_ADD(dtim_count);
180 DEBUGFS_ADD(num_buffered_multicast, ap); 285 DEBUGFS_ADD(num_buffered_multicast);
181} 286}
182 287
183static void add_wds_files(struct ieee80211_sub_if_data *sdata) 288static void add_wds_files(struct ieee80211_sub_if_data *sdata)
184{ 289{
185 DEBUGFS_ADD(drop_unencrypted, wds); 290 DEBUGFS_ADD(drop_unencrypted);
186 DEBUGFS_ADD(force_unicast_rateidx, wds); 291 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
187 DEBUGFS_ADD(max_ratectrl_rateidx, wds); 292 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
188 293
189 DEBUGFS_ADD(peer, wds); 294 DEBUGFS_ADD(peer);
190} 295}
191 296
192static void add_vlan_files(struct ieee80211_sub_if_data *sdata) 297static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
193{ 298{
194 DEBUGFS_ADD(drop_unencrypted, vlan); 299 DEBUGFS_ADD(drop_unencrypted);
195 DEBUGFS_ADD(force_unicast_rateidx, vlan); 300 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
196 DEBUGFS_ADD(max_ratectrl_rateidx, vlan); 301 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
197} 302}
198 303
199static void add_monitor_files(struct ieee80211_sub_if_data *sdata) 304static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
@@ -280,16 +385,11 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
280 } 385 }
281} 386}
282 387
283static int notif_registered;
284
285void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata) 388void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata)
286{ 389{
287 char buf[10+IFNAMSIZ]; 390 char buf[10+IFNAMSIZ];
288 391
289 if (!notif_registered) 392 sprintf(buf, "netdev:%s", sdata->name);
290 return;
291
292 sprintf(buf, "netdev:%s", sdata->dev->name);
293 sdata->debugfs.dir = debugfs_create_dir(buf, 393 sdata->debugfs.dir = debugfs_create_dir(buf,
294 sdata->local->hw.wiphy->debugfsdir); 394 sdata->local->hw.wiphy->debugfsdir);
295 add_files(sdata); 395 add_files(sdata);
@@ -304,58 +404,18 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
304 sdata->debugfs.dir = NULL; 404 sdata->debugfs.dir = NULL;
305} 405}
306 406
307static int netdev_notify(struct notifier_block *nb, 407void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
308 unsigned long state,
309 void *ndev)
310{ 408{
311 struct net_device *dev = ndev;
312 struct dentry *dir; 409 struct dentry *dir;
313 struct ieee80211_sub_if_data *sdata; 410 char buf[10 + IFNAMSIZ];
314 char buf[10+IFNAMSIZ];
315
316 if (state != NETDEV_CHANGENAME)
317 return 0;
318
319 if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
320 return 0;
321
322 if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
323 return 0;
324
325 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
326 411
327 dir = sdata->debugfs.dir; 412 dir = sdata->debugfs.dir;
328 413
329 if (!dir) 414 if (!dir)
330 return 0; 415 return;
331 416
332 sprintf(buf, "netdev:%s", dev->name); 417 sprintf(buf, "netdev:%s", sdata->name);
333 if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf)) 418 if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf))
334 printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs " 419 printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs "
335 "dir to %s\n", buf); 420 "dir to %s\n", buf);
336
337 return 0;
338}
339
340static struct notifier_block mac80211_debugfs_netdev_notifier = {
341 .notifier_call = netdev_notify,
342};
343
344void ieee80211_debugfs_netdev_init(void)
345{
346 int err;
347
348 err = register_netdevice_notifier(&mac80211_debugfs_netdev_notifier);
349 if (err) {
350 printk(KERN_ERR
351 "mac80211: failed to install netdev notifier,"
352 " disabling per-netdev debugfs!\n");
353 } else
354 notif_registered = 1;
355}
356
357void ieee80211_debugfs_netdev_exit(void)
358{
359 unregister_netdevice_notifier(&mac80211_debugfs_netdev_notifier);
360 notif_registered = 0;
361} 421}
diff --git a/net/mac80211/debugfs_netdev.h b/net/mac80211/debugfs_netdev.h
index 7af731f0b73..79025e79f4d 100644
--- a/net/mac80211/debugfs_netdev.h
+++ b/net/mac80211/debugfs_netdev.h
@@ -6,8 +6,7 @@
6#ifdef CONFIG_MAC80211_DEBUGFS 6#ifdef CONFIG_MAC80211_DEBUGFS
7void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata); 7void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata);
8void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata); 8void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata);
9void ieee80211_debugfs_netdev_init(void); 9void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata);
10void ieee80211_debugfs_netdev_exit(void);
11#else 10#else
12static inline void ieee80211_debugfs_add_netdev( 11static inline void ieee80211_debugfs_add_netdev(
13 struct ieee80211_sub_if_data *sdata) 12 struct ieee80211_sub_if_data *sdata)
@@ -15,10 +14,8 @@ static inline void ieee80211_debugfs_add_netdev(
15static inline void ieee80211_debugfs_remove_netdev( 14static inline void ieee80211_debugfs_remove_netdev(
16 struct ieee80211_sub_if_data *sdata) 15 struct ieee80211_sub_if_data *sdata)
17{} 16{}
18static inline void ieee80211_debugfs_netdev_init(void) 17static inline void ieee80211_debugfs_rename_netdev(
19{} 18 struct ieee80211_sub_if_data *sdata)
20
21static inline void ieee80211_debugfs_netdev_exit(void)
22{} 19{}
23#endif 20#endif
24 21
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 3f41608c808..d92800bb2d2 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -44,7 +44,7 @@ static const struct file_operations sta_ ##name## _ops = { \
44 STA_OPS(name) 44 STA_OPS(name)
45 45
46STA_FILE(aid, sta.aid, D); 46STA_FILE(aid, sta.aid, D);
47STA_FILE(dev, sdata->dev->name, S); 47STA_FILE(dev, sdata->name, S);
48STA_FILE(rx_packets, rx_packets, LU); 48STA_FILE(rx_packets, rx_packets, LU);
49STA_FILE(tx_packets, tx_packets, LU); 49STA_FILE(tx_packets, tx_packets, LU);
50STA_FILE(rx_bytes, rx_bytes, LU); 50STA_FILE(rx_bytes, rx_bytes, LU);
@@ -120,36 +120,38 @@ STA_OPS(last_seq_ctrl);
120static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, 120static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
121 size_t count, loff_t *ppos) 121 size_t count, loff_t *ppos)
122{ 122{
123 char buf[30 + STA_TID_NUM * 70], *p = buf; 123 char buf[64 + STA_TID_NUM * 40], *p = buf;
124 int i; 124 int i;
125 struct sta_info *sta = file->private_data; 125 struct sta_info *sta = file->private_data;
126 126
127 spin_lock_bh(&sta->lock); 127 spin_lock_bh(&sta->lock);
128 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n", 128 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
129 sta->ampdu_mlme.dialog_token_allocator + 1); 129 sta->ampdu_mlme.dialog_token_allocator + 1);
130 p += scnprintf(p, sizeof(buf) + buf - p,
131 "TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n");
130 for (i = 0; i < STA_TID_NUM; i++) { 132 for (i = 0; i < STA_TID_NUM; i++) {
131 p += scnprintf(p, sizeof(buf)+buf-p, "TID %02d:", i); 133 p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i);
132 p += scnprintf(p, sizeof(buf)+buf-p, " RX=%x", 134 p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
133 sta->ampdu_mlme.tid_state_rx[i]); 135 sta->ampdu_mlme.tid_state_rx[i]);
134 p += scnprintf(p, sizeof(buf)+buf-p, "/DTKN=%#.2x", 136 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
135 sta->ampdu_mlme.tid_state_rx[i] ? 137 sta->ampdu_mlme.tid_state_rx[i] ?
136 sta->ampdu_mlme.tid_rx[i]->dialog_token : 0); 138 sta->ampdu_mlme.tid_rx[i]->dialog_token : 0);
137 p += scnprintf(p, sizeof(buf)+buf-p, "/SSN=%#.3x", 139 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
138 sta->ampdu_mlme.tid_state_rx[i] ? 140 sta->ampdu_mlme.tid_state_rx[i] ?
139 sta->ampdu_mlme.tid_rx[i]->ssn : 0); 141 sta->ampdu_mlme.tid_rx[i]->ssn : 0);
140 142
141 p += scnprintf(p, sizeof(buf)+buf-p, " TX=%x", 143 p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
142 sta->ampdu_mlme.tid_state_tx[i]); 144 sta->ampdu_mlme.tid_state_tx[i]);
143 p += scnprintf(p, sizeof(buf)+buf-p, "/DTKN=%#.2x", 145 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
144 sta->ampdu_mlme.tid_state_tx[i] ? 146 sta->ampdu_mlme.tid_state_tx[i] ?
145 sta->ampdu_mlme.tid_tx[i]->dialog_token : 0); 147 sta->ampdu_mlme.tid_tx[i]->dialog_token : 0);
146 p += scnprintf(p, sizeof(buf)+buf-p, "/SSN=%#.3x", 148 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
147 sta->ampdu_mlme.tid_state_tx[i] ? 149 sta->ampdu_mlme.tid_state_tx[i] ?
148 sta->ampdu_mlme.tid_tx[i]->ssn : 0); 150 sta->ampdu_mlme.tid_tx[i]->ssn : 0);
149 p += scnprintf(p, sizeof(buf)+buf-p, "/pending=%03d", 151 p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d",
150 sta->ampdu_mlme.tid_state_tx[i] ? 152 sta->ampdu_mlme.tid_state_tx[i] ?
151 skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0); 153 skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0);
152 p += scnprintf(p, sizeof(buf)+buf-p, "\n"); 154 p += scnprintf(p, sizeof(buf) + buf - p, "\n");
153 } 155 }
154 spin_unlock_bh(&sta->lock); 156 spin_unlock_bh(&sta->lock);
155 157
@@ -160,7 +162,12 @@ STA_OPS(agg_status);
160static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf, 162static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
161 size_t count, loff_t *ppos) 163 size_t count, loff_t *ppos)
162{ 164{
163 char buf[200], *p = buf; 165#define PRINT_HT_CAP(_cond, _str) \
166 do { \
167 if (_cond) \
168 p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \
169 } while (0)
170 char buf[512], *p = buf;
164 int i; 171 int i;
165 struct sta_info *sta = file->private_data; 172 struct sta_info *sta = file->private_data;
166 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap; 173 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
@@ -168,15 +175,64 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
168 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n", 175 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
169 htc->ht_supported ? "" : "not "); 176 htc->ht_supported ? "" : "not ");
170 if (htc->ht_supported) { 177 if (htc->ht_supported) {
171 p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.2x\n", htc->cap); 178 p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap);
179
180 PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDCP");
181 PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40");
182 PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20");
183
184 PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 0, "Static SM Power Save");
185 PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 1, "Dynamic SM Power Save");
186 PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 3, "SM Power Save disabled");
187
188 PRINT_HT_CAP((htc->cap & BIT(4)), "RX Greenfield");
189 PRINT_HT_CAP((htc->cap & BIT(5)), "RX HT20 SGI");
190 PRINT_HT_CAP((htc->cap & BIT(6)), "RX HT40 SGI");
191 PRINT_HT_CAP((htc->cap & BIT(7)), "TX STBC");
192
193 PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 0, "No RX STBC");
194 PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 1, "RX STBC 1-stream");
195 PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 2, "RX STBC 2-streams");
196 PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 3, "RX STBC 3-streams");
197
198 PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack");
199
200 PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
201 "3839 bytes");
202 PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: "
203 "7935 bytes");
204
205 /*
206 * For beacons and probe response this would mean the BSS
207 * does or does not allow the usage of DSSS/CCK HT40.
208 * Otherwise it means the STA does or does not use
209 * DSSS/CCK HT40.
210 */
211 PRINT_HT_CAP((htc->cap & BIT(12)), "DSSS/CCK HT40");
212 PRINT_HT_CAP(!(htc->cap & BIT(12)), "No DSSS/CCK HT40");
213
214 /* BIT(13) is reserved */
215
216 PRINT_HT_CAP((htc->cap & BIT(14)), "40 MHz Intolerant");
217
218 PRINT_HT_CAP((htc->cap & BIT(15)), "L-SIG TXOP protection");
219
172 p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n", 220 p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n",
173 htc->ampdu_factor, htc->ampdu_density); 221 htc->ampdu_factor, htc->ampdu_density);
174 p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:"); 222 p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:");
223
175 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) 224 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
176 p += scnprintf(p, sizeof(buf)+buf-p, " %.2x", 225 p += scnprintf(p, sizeof(buf)+buf-p, " %.2x",
177 htc->mcs.rx_mask[i]); 226 htc->mcs.rx_mask[i]);
178 p += scnprintf(p, sizeof(buf)+buf-p, "\nMCS rx highest: %d\n", 227 p += scnprintf(p, sizeof(buf)+buf-p, "\n");
179 le16_to_cpu(htc->mcs.rx_highest)); 228
229 /* If not set this is meaningless */
230 if (le16_to_cpu(htc->mcs.rx_highest)) {
231 p += scnprintf(p, sizeof(buf)+buf-p,
232 "MCS rx highest: %d Mbps\n",
233 le16_to_cpu(htc->mcs.rx_highest));
234 }
235
180 p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n", 236 p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n",
181 htc->mcs.tx_params); 237 htc->mcs.tx_params);
182 } 238 }
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 921dd9c9ff6..c3d844093a2 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -14,6 +14,8 @@ static inline int drv_start(struct ieee80211_local *local)
14{ 14{
15 int ret; 15 int ret;
16 16
17 might_sleep();
18
17 local->started = true; 19 local->started = true;
18 smp_mb(); 20 smp_mb();
19 ret = local->ops->start(&local->hw); 21 ret = local->ops->start(&local->hw);
@@ -23,6 +25,8 @@ static inline int drv_start(struct ieee80211_local *local)
23 25
24static inline void drv_stop(struct ieee80211_local *local) 26static inline void drv_stop(struct ieee80211_local *local)
25{ 27{
28 might_sleep();
29
26 local->ops->stop(&local->hw); 30 local->ops->stop(&local->hw);
27 trace_drv_stop(local); 31 trace_drv_stop(local);
28 32
@@ -36,35 +40,47 @@ static inline void drv_stop(struct ieee80211_local *local)
36} 40}
37 41
38static inline int drv_add_interface(struct ieee80211_local *local, 42static inline int drv_add_interface(struct ieee80211_local *local,
39 struct ieee80211_if_init_conf *conf) 43 struct ieee80211_vif *vif)
40{ 44{
41 int ret = local->ops->add_interface(&local->hw, conf); 45 int ret;
42 trace_drv_add_interface(local, conf->mac_addr, conf->vif, ret); 46
47 might_sleep();
48
49 ret = local->ops->add_interface(&local->hw, vif);
50 trace_drv_add_interface(local, vif_to_sdata(vif), ret);
43 return ret; 51 return ret;
44} 52}
45 53
46static inline void drv_remove_interface(struct ieee80211_local *local, 54static inline void drv_remove_interface(struct ieee80211_local *local,
47 struct ieee80211_if_init_conf *conf) 55 struct ieee80211_vif *vif)
48{ 56{
49 local->ops->remove_interface(&local->hw, conf); 57 might_sleep();
50 trace_drv_remove_interface(local, conf->mac_addr, conf->vif); 58
59 local->ops->remove_interface(&local->hw, vif);
60 trace_drv_remove_interface(local, vif_to_sdata(vif));
51} 61}
52 62
53static inline int drv_config(struct ieee80211_local *local, u32 changed) 63static inline int drv_config(struct ieee80211_local *local, u32 changed)
54{ 64{
55 int ret = local->ops->config(&local->hw, changed); 65 int ret;
66
67 might_sleep();
68
69 ret = local->ops->config(&local->hw, changed);
56 trace_drv_config(local, changed, ret); 70 trace_drv_config(local, changed, ret);
57 return ret; 71 return ret;
58} 72}
59 73
60static inline void drv_bss_info_changed(struct ieee80211_local *local, 74static inline void drv_bss_info_changed(struct ieee80211_local *local,
61 struct ieee80211_vif *vif, 75 struct ieee80211_sub_if_data *sdata,
62 struct ieee80211_bss_conf *info, 76 struct ieee80211_bss_conf *info,
63 u32 changed) 77 u32 changed)
64{ 78{
79 might_sleep();
80
65 if (local->ops->bss_info_changed) 81 if (local->ops->bss_info_changed)
66 local->ops->bss_info_changed(&local->hw, vif, info, changed); 82 local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed);
67 trace_drv_bss_info_changed(local, vif, info, changed); 83 trace_drv_bss_info_changed(local, sdata, info, changed);
68} 84}
69 85
70static inline u64 drv_prepare_multicast(struct ieee80211_local *local, 86static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
@@ -106,36 +122,53 @@ static inline int drv_set_tim(struct ieee80211_local *local,
106} 122}
107 123
108static inline int drv_set_key(struct ieee80211_local *local, 124static inline int drv_set_key(struct ieee80211_local *local,
109 enum set_key_cmd cmd, struct ieee80211_vif *vif, 125 enum set_key_cmd cmd,
126 struct ieee80211_sub_if_data *sdata,
110 struct ieee80211_sta *sta, 127 struct ieee80211_sta *sta,
111 struct ieee80211_key_conf *key) 128 struct ieee80211_key_conf *key)
112{ 129{
113 int ret = local->ops->set_key(&local->hw, cmd, vif, sta, key); 130 int ret;
114 trace_drv_set_key(local, cmd, vif, sta, key, ret); 131
132 might_sleep();
133
134 ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
135 trace_drv_set_key(local, cmd, sdata, sta, key, ret);
115 return ret; 136 return ret;
116} 137}
117 138
118static inline void drv_update_tkip_key(struct ieee80211_local *local, 139static inline void drv_update_tkip_key(struct ieee80211_local *local,
140 struct ieee80211_sub_if_data *sdata,
119 struct ieee80211_key_conf *conf, 141 struct ieee80211_key_conf *conf,
120 const u8 *address, u32 iv32, 142 struct sta_info *sta, u32 iv32,
121 u16 *phase1key) 143 u16 *phase1key)
122{ 144{
145 struct ieee80211_sta *ista = NULL;
146
147 if (sta)
148 ista = &sta->sta;
149
123 if (local->ops->update_tkip_key) 150 if (local->ops->update_tkip_key)
124 local->ops->update_tkip_key(&local->hw, conf, address, 151 local->ops->update_tkip_key(&local->hw, &sdata->vif, conf,
125 iv32, phase1key); 152 ista, iv32, phase1key);
126 trace_drv_update_tkip_key(local, conf, address, iv32); 153 trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
127} 154}
128 155
129static inline int drv_hw_scan(struct ieee80211_local *local, 156static inline int drv_hw_scan(struct ieee80211_local *local,
130 struct cfg80211_scan_request *req) 157 struct cfg80211_scan_request *req)
131{ 158{
132 int ret = local->ops->hw_scan(&local->hw, req); 159 int ret;
160
161 might_sleep();
162
163 ret = local->ops->hw_scan(&local->hw, req);
133 trace_drv_hw_scan(local, req, ret); 164 trace_drv_hw_scan(local, req, ret);
134 return ret; 165 return ret;
135} 166}
136 167
137static inline void drv_sw_scan_start(struct ieee80211_local *local) 168static inline void drv_sw_scan_start(struct ieee80211_local *local)
138{ 169{
170 might_sleep();
171
139 if (local->ops->sw_scan_start) 172 if (local->ops->sw_scan_start)
140 local->ops->sw_scan_start(&local->hw); 173 local->ops->sw_scan_start(&local->hw);
141 trace_drv_sw_scan_start(local); 174 trace_drv_sw_scan_start(local);
@@ -143,6 +176,8 @@ static inline void drv_sw_scan_start(struct ieee80211_local *local)
143 176
144static inline void drv_sw_scan_complete(struct ieee80211_local *local) 177static inline void drv_sw_scan_complete(struct ieee80211_local *local)
145{ 178{
179 might_sleep();
180
146 if (local->ops->sw_scan_complete) 181 if (local->ops->sw_scan_complete)
147 local->ops->sw_scan_complete(&local->hw); 182 local->ops->sw_scan_complete(&local->hw);
148 trace_drv_sw_scan_complete(local); 183 trace_drv_sw_scan_complete(local);
@@ -153,6 +188,8 @@ static inline int drv_get_stats(struct ieee80211_local *local,
153{ 188{
154 int ret = -EOPNOTSUPP; 189 int ret = -EOPNOTSUPP;
155 190
191 might_sleep();
192
156 if (local->ops->get_stats) 193 if (local->ops->get_stats)
157 ret = local->ops->get_stats(&local->hw, stats); 194 ret = local->ops->get_stats(&local->hw, stats);
158 trace_drv_get_stats(local, stats, ret); 195 trace_drv_get_stats(local, stats, ret);
@@ -172,43 +209,93 @@ static inline int drv_set_rts_threshold(struct ieee80211_local *local,
172 u32 value) 209 u32 value)
173{ 210{
174 int ret = 0; 211 int ret = 0;
212
213 might_sleep();
214
175 if (local->ops->set_rts_threshold) 215 if (local->ops->set_rts_threshold)
176 ret = local->ops->set_rts_threshold(&local->hw, value); 216 ret = local->ops->set_rts_threshold(&local->hw, value);
177 trace_drv_set_rts_threshold(local, value, ret); 217 trace_drv_set_rts_threshold(local, value, ret);
178 return ret; 218 return ret;
179} 219}
180 220
221static inline int drv_set_coverage_class(struct ieee80211_local *local,
222 u8 value)
223{
224 int ret = 0;
225 might_sleep();
226
227 if (local->ops->set_coverage_class)
228 local->ops->set_coverage_class(&local->hw, value);
229 else
230 ret = -EOPNOTSUPP;
231
232 trace_drv_set_coverage_class(local, value, ret);
233 return ret;
234}
235
181static inline void drv_sta_notify(struct ieee80211_local *local, 236static inline void drv_sta_notify(struct ieee80211_local *local,
182 struct ieee80211_vif *vif, 237 struct ieee80211_sub_if_data *sdata,
183 enum sta_notify_cmd cmd, 238 enum sta_notify_cmd cmd,
184 struct ieee80211_sta *sta) 239 struct ieee80211_sta *sta)
185{ 240{
186 if (local->ops->sta_notify) 241 if (local->ops->sta_notify)
187 local->ops->sta_notify(&local->hw, vif, cmd, sta); 242 local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta);
188 trace_drv_sta_notify(local, vif, cmd, sta); 243 trace_drv_sta_notify(local, sdata, cmd, sta);
244}
245
246static inline int drv_sta_add(struct ieee80211_local *local,
247 struct ieee80211_sub_if_data *sdata,
248 struct ieee80211_sta *sta)
249{
250 int ret = 0;
251
252 might_sleep();
253
254 if (local->ops->sta_add)
255 ret = local->ops->sta_add(&local->hw, &sdata->vif, sta);
256 else if (local->ops->sta_notify)
257 local->ops->sta_notify(&local->hw, &sdata->vif,
258 STA_NOTIFY_ADD, sta);
259
260 trace_drv_sta_add(local, sdata, sta, ret);
261
262 return ret;
263}
264
265static inline void drv_sta_remove(struct ieee80211_local *local,
266 struct ieee80211_sub_if_data *sdata,
267 struct ieee80211_sta *sta)
268{
269 might_sleep();
270
271 if (local->ops->sta_remove)
272 local->ops->sta_remove(&local->hw, &sdata->vif, sta);
273 else if (local->ops->sta_notify)
274 local->ops->sta_notify(&local->hw, &sdata->vif,
275 STA_NOTIFY_REMOVE, sta);
276
277 trace_drv_sta_remove(local, sdata, sta);
189} 278}
190 279
191static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue, 280static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
192 const struct ieee80211_tx_queue_params *params) 281 const struct ieee80211_tx_queue_params *params)
193{ 282{
194 int ret = -EOPNOTSUPP; 283 int ret = -EOPNOTSUPP;
284
285 might_sleep();
286
195 if (local->ops->conf_tx) 287 if (local->ops->conf_tx)
196 ret = local->ops->conf_tx(&local->hw, queue, params); 288 ret = local->ops->conf_tx(&local->hw, queue, params);
197 trace_drv_conf_tx(local, queue, params, ret); 289 trace_drv_conf_tx(local, queue, params, ret);
198 return ret; 290 return ret;
199} 291}
200 292
201static inline int drv_get_tx_stats(struct ieee80211_local *local,
202 struct ieee80211_tx_queue_stats *stats)
203{
204 int ret = local->ops->get_tx_stats(&local->hw, stats);
205 trace_drv_get_tx_stats(local, stats, ret);
206 return ret;
207}
208
209static inline u64 drv_get_tsf(struct ieee80211_local *local) 293static inline u64 drv_get_tsf(struct ieee80211_local *local)
210{ 294{
211 u64 ret = -1ULL; 295 u64 ret = -1ULL;
296
297 might_sleep();
298
212 if (local->ops->get_tsf) 299 if (local->ops->get_tsf)
213 ret = local->ops->get_tsf(&local->hw); 300 ret = local->ops->get_tsf(&local->hw);
214 trace_drv_get_tsf(local, ret); 301 trace_drv_get_tsf(local, ret);
@@ -217,6 +304,8 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local)
217 304
218static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf) 305static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
219{ 306{
307 might_sleep();
308
220 if (local->ops->set_tsf) 309 if (local->ops->set_tsf)
221 local->ops->set_tsf(&local->hw, tsf); 310 local->ops->set_tsf(&local->hw, tsf);
222 trace_drv_set_tsf(local, tsf); 311 trace_drv_set_tsf(local, tsf);
@@ -224,6 +313,8 @@ static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
224 313
225static inline void drv_reset_tsf(struct ieee80211_local *local) 314static inline void drv_reset_tsf(struct ieee80211_local *local)
226{ 315{
316 might_sleep();
317
227 if (local->ops->reset_tsf) 318 if (local->ops->reset_tsf)
228 local->ops->reset_tsf(&local->hw); 319 local->ops->reset_tsf(&local->hw);
229 trace_drv_reset_tsf(local); 320 trace_drv_reset_tsf(local);
@@ -232,6 +323,9 @@ static inline void drv_reset_tsf(struct ieee80211_local *local)
232static inline int drv_tx_last_beacon(struct ieee80211_local *local) 323static inline int drv_tx_last_beacon(struct ieee80211_local *local)
233{ 324{
234 int ret = 1; 325 int ret = 1;
326
327 might_sleep();
328
235 if (local->ops->tx_last_beacon) 329 if (local->ops->tx_last_beacon)
236 ret = local->ops->tx_last_beacon(&local->hw); 330 ret = local->ops->tx_last_beacon(&local->hw);
237 trace_drv_tx_last_beacon(local, ret); 331 trace_drv_tx_last_beacon(local, ret);
@@ -239,23 +333,34 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local)
239} 333}
240 334
241static inline int drv_ampdu_action(struct ieee80211_local *local, 335static inline int drv_ampdu_action(struct ieee80211_local *local,
242 struct ieee80211_vif *vif, 336 struct ieee80211_sub_if_data *sdata,
243 enum ieee80211_ampdu_mlme_action action, 337 enum ieee80211_ampdu_mlme_action action,
244 struct ieee80211_sta *sta, u16 tid, 338 struct ieee80211_sta *sta, u16 tid,
245 u16 *ssn) 339 u16 *ssn)
246{ 340{
247 int ret = -EOPNOTSUPP; 341 int ret = -EOPNOTSUPP;
248 if (local->ops->ampdu_action) 342 if (local->ops->ampdu_action)
249 ret = local->ops->ampdu_action(&local->hw, vif, action, 343 ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
250 sta, tid, ssn); 344 sta, tid, ssn);
251 trace_drv_ampdu_action(local, vif, action, sta, tid, ssn, ret); 345 trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, ret);
252 return ret; 346 return ret;
253} 347}
254 348
255 349
256static inline void drv_rfkill_poll(struct ieee80211_local *local) 350static inline void drv_rfkill_poll(struct ieee80211_local *local)
257{ 351{
352 might_sleep();
353
258 if (local->ops->rfkill_poll) 354 if (local->ops->rfkill_poll)
259 local->ops->rfkill_poll(&local->hw); 355 local->ops->rfkill_poll(&local->hw);
260} 356}
357
358static inline void drv_flush(struct ieee80211_local *local, bool drop)
359{
360 might_sleep();
361
362 trace_drv_flush(local, drop);
363 if (local->ops->flush)
364 local->ops->flush(&local->hw, drop);
365}
261#endif /* __MAC80211_DRIVER_OPS */ 366#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index da8497ef706..41baf730a5c 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -25,10 +25,12 @@ static inline void trace_ ## name(proto) {}
25#define STA_PR_FMT " sta:%pM" 25#define STA_PR_FMT " sta:%pM"
26#define STA_PR_ARG __entry->sta_addr 26#define STA_PR_ARG __entry->sta_addr
27 27
28#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, vif) 28#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \
29#define VIF_ASSIGN __entry->vif_type = vif ? vif->type : 0; __entry->vif = vif 29 __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
30#define VIF_PR_FMT " vif:%p(%d)" 30#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \
31#define VIF_PR_ARG __entry->vif, __entry->vif_type 31 __assign_str(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
32#define VIF_PR_FMT " vif:%s(%d)"
33#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type
32 34
33TRACE_EVENT(drv_start, 35TRACE_EVENT(drv_start,
34 TP_PROTO(struct ieee80211_local *local, int ret), 36 TP_PROTO(struct ieee80211_local *local, int ret),
@@ -70,11 +72,10 @@ TRACE_EVENT(drv_stop,
70 72
71TRACE_EVENT(drv_add_interface, 73TRACE_EVENT(drv_add_interface,
72 TP_PROTO(struct ieee80211_local *local, 74 TP_PROTO(struct ieee80211_local *local,
73 const u8 *addr, 75 struct ieee80211_sub_if_data *sdata,
74 struct ieee80211_vif *vif,
75 int ret), 76 int ret),
76 77
77 TP_ARGS(local, addr, vif, ret), 78 TP_ARGS(local, sdata, ret),
78 79
79 TP_STRUCT__entry( 80 TP_STRUCT__entry(
80 LOCAL_ENTRY 81 LOCAL_ENTRY
@@ -86,7 +87,7 @@ TRACE_EVENT(drv_add_interface,
86 TP_fast_assign( 87 TP_fast_assign(
87 LOCAL_ASSIGN; 88 LOCAL_ASSIGN;
88 VIF_ASSIGN; 89 VIF_ASSIGN;
89 memcpy(__entry->addr, addr, 6); 90 memcpy(__entry->addr, sdata->vif.addr, 6);
90 __entry->ret = ret; 91 __entry->ret = ret;
91 ), 92 ),
92 93
@@ -97,10 +98,9 @@ TRACE_EVENT(drv_add_interface,
97); 98);
98 99
99TRACE_EVENT(drv_remove_interface, 100TRACE_EVENT(drv_remove_interface,
100 TP_PROTO(struct ieee80211_local *local, 101 TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata),
101 const u8 *addr, struct ieee80211_vif *vif),
102 102
103 TP_ARGS(local, addr, vif), 103 TP_ARGS(local, sdata),
104 104
105 TP_STRUCT__entry( 105 TP_STRUCT__entry(
106 LOCAL_ENTRY 106 LOCAL_ENTRY
@@ -111,7 +111,7 @@ TRACE_EVENT(drv_remove_interface,
111 TP_fast_assign( 111 TP_fast_assign(
112 LOCAL_ASSIGN; 112 LOCAL_ASSIGN;
113 VIF_ASSIGN; 113 VIF_ASSIGN;
114 memcpy(__entry->addr, addr, 6); 114 memcpy(__entry->addr, sdata->vif.addr, 6);
115 ), 115 ),
116 116
117 TP_printk( 117 TP_printk(
@@ -140,6 +140,7 @@ TRACE_EVENT(drv_config,
140 __field(u8, short_frame_max_tx_count) 140 __field(u8, short_frame_max_tx_count)
141 __field(int, center_freq) 141 __field(int, center_freq)
142 __field(int, channel_type) 142 __field(int, channel_type)
143 __field(int, smps)
143 ), 144 ),
144 145
145 TP_fast_assign( 146 TP_fast_assign(
@@ -155,6 +156,7 @@ TRACE_EVENT(drv_config,
155 __entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count; 156 __entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count;
156 __entry->center_freq = local->hw.conf.channel->center_freq; 157 __entry->center_freq = local->hw.conf.channel->center_freq;
157 __entry->channel_type = local->hw.conf.channel_type; 158 __entry->channel_type = local->hw.conf.channel_type;
159 __entry->smps = local->hw.conf.smps_mode;
158 ), 160 ),
159 161
160 TP_printk( 162 TP_printk(
@@ -165,11 +167,11 @@ TRACE_EVENT(drv_config,
165 167
166TRACE_EVENT(drv_bss_info_changed, 168TRACE_EVENT(drv_bss_info_changed,
167 TP_PROTO(struct ieee80211_local *local, 169 TP_PROTO(struct ieee80211_local *local,
168 struct ieee80211_vif *vif, 170 struct ieee80211_sub_if_data *sdata,
169 struct ieee80211_bss_conf *info, 171 struct ieee80211_bss_conf *info,
170 u32 changed), 172 u32 changed),
171 173
172 TP_ARGS(local, vif, info, changed), 174 TP_ARGS(local, sdata, info, changed),
173 175
174 TP_STRUCT__entry( 176 TP_STRUCT__entry(
175 LOCAL_ENTRY 177 LOCAL_ENTRY
@@ -293,11 +295,11 @@ TRACE_EVENT(drv_set_tim,
293 295
294TRACE_EVENT(drv_set_key, 296TRACE_EVENT(drv_set_key,
295 TP_PROTO(struct ieee80211_local *local, 297 TP_PROTO(struct ieee80211_local *local,
296 enum set_key_cmd cmd, struct ieee80211_vif *vif, 298 enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata,
297 struct ieee80211_sta *sta, 299 struct ieee80211_sta *sta,
298 struct ieee80211_key_conf *key, int ret), 300 struct ieee80211_key_conf *key, int ret),
299 301
300 TP_ARGS(local, cmd, vif, sta, key, ret), 302 TP_ARGS(local, cmd, sdata, sta, key, ret),
301 303
302 TP_STRUCT__entry( 304 TP_STRUCT__entry(
303 LOCAL_ENTRY 305 LOCAL_ENTRY
@@ -329,26 +331,29 @@ TRACE_EVENT(drv_set_key,
329 331
330TRACE_EVENT(drv_update_tkip_key, 332TRACE_EVENT(drv_update_tkip_key,
331 TP_PROTO(struct ieee80211_local *local, 333 TP_PROTO(struct ieee80211_local *local,
334 struct ieee80211_sub_if_data *sdata,
332 struct ieee80211_key_conf *conf, 335 struct ieee80211_key_conf *conf,
333 const u8 *address, u32 iv32), 336 struct ieee80211_sta *sta, u32 iv32),
334 337
335 TP_ARGS(local, conf, address, iv32), 338 TP_ARGS(local, sdata, conf, sta, iv32),
336 339
337 TP_STRUCT__entry( 340 TP_STRUCT__entry(
338 LOCAL_ENTRY 341 LOCAL_ENTRY
339 __array(u8, addr, 6) 342 VIF_ENTRY
343 STA_ENTRY
340 __field(u32, iv32) 344 __field(u32, iv32)
341 ), 345 ),
342 346
343 TP_fast_assign( 347 TP_fast_assign(
344 LOCAL_ASSIGN; 348 LOCAL_ASSIGN;
345 memcpy(__entry->addr, address, 6); 349 VIF_ASSIGN;
350 STA_ASSIGN;
346 __entry->iv32 = iv32; 351 __entry->iv32 = iv32;
347 ), 352 ),
348 353
349 TP_printk( 354 TP_printk(
350 LOCAL_PR_FMT " addr:%pM iv32:%#x", 355 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " iv32:%#x",
351 LOCAL_PR_ARG, __entry->addr, __entry->iv32 356 LOCAL_PR_ARG,VIF_PR_ARG,STA_PR_ARG, __entry->iv32
352 ) 357 )
353); 358);
354 359
@@ -489,13 +494,36 @@ TRACE_EVENT(drv_set_rts_threshold,
489 ) 494 )
490); 495);
491 496
497TRACE_EVENT(drv_set_coverage_class,
498 TP_PROTO(struct ieee80211_local *local, u8 value, int ret),
499
500 TP_ARGS(local, value, ret),
501
502 TP_STRUCT__entry(
503 LOCAL_ENTRY
504 __field(u8, value)
505 __field(int, ret)
506 ),
507
508 TP_fast_assign(
509 LOCAL_ASSIGN;
510 __entry->ret = ret;
511 __entry->value = value;
512 ),
513
514 TP_printk(
515 LOCAL_PR_FMT " value:%d ret:%d",
516 LOCAL_PR_ARG, __entry->value, __entry->ret
517 )
518);
519
492TRACE_EVENT(drv_sta_notify, 520TRACE_EVENT(drv_sta_notify,
493 TP_PROTO(struct ieee80211_local *local, 521 TP_PROTO(struct ieee80211_local *local,
494 struct ieee80211_vif *vif, 522 struct ieee80211_sub_if_data *sdata,
495 enum sta_notify_cmd cmd, 523 enum sta_notify_cmd cmd,
496 struct ieee80211_sta *sta), 524 struct ieee80211_sta *sta),
497 525
498 TP_ARGS(local, vif, cmd, sta), 526 TP_ARGS(local, sdata, cmd, sta),
499 527
500 TP_STRUCT__entry( 528 TP_STRUCT__entry(
501 LOCAL_ENTRY 529 LOCAL_ENTRY
@@ -517,59 +545,88 @@ TRACE_EVENT(drv_sta_notify,
517 ) 545 )
518); 546);
519 547
520TRACE_EVENT(drv_conf_tx, 548TRACE_EVENT(drv_sta_add,
521 TP_PROTO(struct ieee80211_local *local, u16 queue, 549 TP_PROTO(struct ieee80211_local *local,
522 const struct ieee80211_tx_queue_params *params, 550 struct ieee80211_sub_if_data *sdata,
523 int ret), 551 struct ieee80211_sta *sta, int ret),
524 552
525 TP_ARGS(local, queue, params, ret), 553 TP_ARGS(local, sdata, sta, ret),
526 554
527 TP_STRUCT__entry( 555 TP_STRUCT__entry(
528 LOCAL_ENTRY 556 LOCAL_ENTRY
529 __field(u16, queue) 557 VIF_ENTRY
530 __field(u16, txop) 558 STA_ENTRY
531 __field(u16, cw_min)
532 __field(u16, cw_max)
533 __field(u8, aifs)
534 __field(int, ret) 559 __field(int, ret)
535 ), 560 ),
536 561
537 TP_fast_assign( 562 TP_fast_assign(
538 LOCAL_ASSIGN; 563 LOCAL_ASSIGN;
539 __entry->queue = queue; 564 VIF_ASSIGN;
565 STA_ASSIGN;
540 __entry->ret = ret; 566 __entry->ret = ret;
541 __entry->txop = params->txop;
542 __entry->cw_max = params->cw_max;
543 __entry->cw_min = params->cw_min;
544 __entry->aifs = params->aifs;
545 ), 567 ),
546 568
547 TP_printk( 569 TP_printk(
548 LOCAL_PR_FMT " queue:%d ret:%d", 570 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d",
549 LOCAL_PR_ARG, __entry->queue, __entry->ret 571 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret
550 ) 572 )
551); 573);
552 574
553TRACE_EVENT(drv_get_tx_stats, 575TRACE_EVENT(drv_sta_remove,
554 TP_PROTO(struct ieee80211_local *local, 576 TP_PROTO(struct ieee80211_local *local,
555 struct ieee80211_tx_queue_stats *stats, 577 struct ieee80211_sub_if_data *sdata,
578 struct ieee80211_sta *sta),
579
580 TP_ARGS(local, sdata, sta),
581
582 TP_STRUCT__entry(
583 LOCAL_ENTRY
584 VIF_ENTRY
585 STA_ENTRY
586 ),
587
588 TP_fast_assign(
589 LOCAL_ASSIGN;
590 VIF_ASSIGN;
591 STA_ASSIGN;
592 ),
593
594 TP_printk(
595 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT,
596 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
597 )
598);
599
600TRACE_EVENT(drv_conf_tx,
601 TP_PROTO(struct ieee80211_local *local, u16 queue,
602 const struct ieee80211_tx_queue_params *params,
556 int ret), 603 int ret),
557 604
558 TP_ARGS(local, stats, ret), 605 TP_ARGS(local, queue, params, ret),
559 606
560 TP_STRUCT__entry( 607 TP_STRUCT__entry(
561 LOCAL_ENTRY 608 LOCAL_ENTRY
609 __field(u16, queue)
610 __field(u16, txop)
611 __field(u16, cw_min)
612 __field(u16, cw_max)
613 __field(u8, aifs)
562 __field(int, ret) 614 __field(int, ret)
563 ), 615 ),
564 616
565 TP_fast_assign( 617 TP_fast_assign(
566 LOCAL_ASSIGN; 618 LOCAL_ASSIGN;
619 __entry->queue = queue;
567 __entry->ret = ret; 620 __entry->ret = ret;
621 __entry->txop = params->txop;
622 __entry->cw_max = params->cw_max;
623 __entry->cw_min = params->cw_min;
624 __entry->aifs = params->aifs;
568 ), 625 ),
569 626
570 TP_printk( 627 TP_printk(
571 LOCAL_PR_FMT " ret:%d", 628 LOCAL_PR_FMT " queue:%d ret:%d",
572 LOCAL_PR_ARG, __entry->ret 629 LOCAL_PR_ARG, __entry->queue, __entry->ret
573 ) 630 )
574); 631);
575 632
@@ -656,12 +713,12 @@ TRACE_EVENT(drv_tx_last_beacon,
656 713
657TRACE_EVENT(drv_ampdu_action, 714TRACE_EVENT(drv_ampdu_action,
658 TP_PROTO(struct ieee80211_local *local, 715 TP_PROTO(struct ieee80211_local *local,
659 struct ieee80211_vif *vif, 716 struct ieee80211_sub_if_data *sdata,
660 enum ieee80211_ampdu_mlme_action action, 717 enum ieee80211_ampdu_mlme_action action,
661 struct ieee80211_sta *sta, u16 tid, 718 struct ieee80211_sta *sta, u16 tid,
662 u16 *ssn, int ret), 719 u16 *ssn, int ret),
663 720
664 TP_ARGS(local, vif, action, sta, tid, ssn, ret), 721 TP_ARGS(local, sdata, action, sta, tid, ssn, ret),
665 722
666 TP_STRUCT__entry( 723 TP_STRUCT__entry(
667 LOCAL_ENTRY 724 LOCAL_ENTRY
@@ -688,6 +745,27 @@ TRACE_EVENT(drv_ampdu_action,
688 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret 745 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret
689 ) 746 )
690); 747);
748
749TRACE_EVENT(drv_flush,
750 TP_PROTO(struct ieee80211_local *local, bool drop),
751
752 TP_ARGS(local, drop),
753
754 TP_STRUCT__entry(
755 LOCAL_ENTRY
756 __field(bool, drop)
757 ),
758
759 TP_fast_assign(
760 LOCAL_ASSIGN;
761 __entry->drop = drop;
762 ),
763
764 TP_printk(
765 LOCAL_PR_FMT " drop:%d",
766 LOCAL_PR_ARG, __entry->drop
767 )
768);
691#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ 769#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
692 770
693#undef TRACE_INCLUDE_PATH 771#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index d7dcee68072..bb677a73b7c 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -125,7 +125,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
125 125
126 if (!skb) { 126 if (!skb) {
127 printk(KERN_ERR "%s: failed to allocate buffer " 127 printk(KERN_ERR "%s: failed to allocate buffer "
128 "for delba frame\n", sdata->dev->name); 128 "for delba frame\n", sdata->name);
129 return; 129 return;
130 } 130 }
131 131
@@ -133,10 +133,10 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
133 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 133 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
134 memset(mgmt, 0, 24); 134 memset(mgmt, 0, 24);
135 memcpy(mgmt->da, da, ETH_ALEN); 135 memcpy(mgmt->da, da, ETH_ALEN);
136 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 136 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
137 if (sdata->vif.type == NL80211_IFTYPE_AP || 137 if (sdata->vif.type == NL80211_IFTYPE_AP ||
138 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 138 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
139 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 139 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
140 else if (sdata->vif.type == NL80211_IFTYPE_STATION) 140 else if (sdata->vif.type == NL80211_IFTYPE_STATION)
141 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); 141 memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
142 142
@@ -185,3 +185,50 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
185 spin_unlock_bh(&sta->lock); 185 spin_unlock_bh(&sta->lock);
186 } 186 }
187} 187}
188
189int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
190 enum ieee80211_smps_mode smps, const u8 *da,
191 const u8 *bssid)
192{
193 struct ieee80211_local *local = sdata->local;
194 struct sk_buff *skb;
195 struct ieee80211_mgmt *action_frame;
196
197 /* 27 = header + category + action + smps mode */
198 skb = dev_alloc_skb(27 + local->hw.extra_tx_headroom);
199 if (!skb)
200 return -ENOMEM;
201
202 skb_reserve(skb, local->hw.extra_tx_headroom);
203 action_frame = (void *)skb_put(skb, 27);
204 memcpy(action_frame->da, da, ETH_ALEN);
205 memcpy(action_frame->sa, sdata->dev->dev_addr, ETH_ALEN);
206 memcpy(action_frame->bssid, bssid, ETH_ALEN);
207 action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
208 IEEE80211_STYPE_ACTION);
209 action_frame->u.action.category = WLAN_CATEGORY_HT;
210 action_frame->u.action.u.ht_smps.action = WLAN_HT_ACTION_SMPS;
211 switch (smps) {
212 case IEEE80211_SMPS_AUTOMATIC:
213 case IEEE80211_SMPS_NUM_MODES:
214 WARN_ON(1);
215 case IEEE80211_SMPS_OFF:
216 action_frame->u.action.u.ht_smps.smps_control =
217 WLAN_HT_SMPS_CONTROL_DISABLED;
218 break;
219 case IEEE80211_SMPS_STATIC:
220 action_frame->u.action.u.ht_smps.smps_control =
221 WLAN_HT_SMPS_CONTROL_STATIC;
222 break;
223 case IEEE80211_SMPS_DYNAMIC:
224 action_frame->u.action.u.ht_smps.smps_control =
225 WLAN_HT_SMPS_CONTROL_DYNAMIC;
226 break;
227 }
228
229 /* we'll do more on status of this frame */
230 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
231 ieee80211_tx_skb(sdata, skb);
232
233 return 0;
234}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 22f0c2aa7a8..f3e94248674 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -117,7 +117,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
117 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 117 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
118 IEEE80211_STYPE_PROBE_RESP); 118 IEEE80211_STYPE_PROBE_RESP);
119 memset(mgmt->da, 0xff, ETH_ALEN); 119 memset(mgmt->da, 0xff, ETH_ALEN);
120 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 120 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
121 memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN); 121 memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
122 mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int); 122 mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int);
123 mgmt->u.beacon.timestamp = cpu_to_le64(tsf); 123 mgmt->u.beacon.timestamp = cpu_to_le64(tsf);
@@ -187,15 +187,17 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
187static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, 187static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
188 struct ieee80211_bss *bss) 188 struct ieee80211_bss *bss)
189{ 189{
190 struct cfg80211_bss *cbss =
191 container_of((void *)bss, struct cfg80211_bss, priv);
190 struct ieee80211_supported_band *sband; 192 struct ieee80211_supported_band *sband;
191 u32 basic_rates; 193 u32 basic_rates;
192 int i, j; 194 int i, j;
193 u16 beacon_int = bss->cbss.beacon_interval; 195 u16 beacon_int = cbss->beacon_interval;
194 196
195 if (beacon_int < 10) 197 if (beacon_int < 10)
196 beacon_int = 10; 198 beacon_int = 10;
197 199
198 sband = sdata->local->hw.wiphy->bands[bss->cbss.channel->band]; 200 sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
199 201
200 basic_rates = 0; 202 basic_rates = 0;
201 203
@@ -212,12 +214,12 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
212 } 214 }
213 } 215 }
214 216
215 __ieee80211_sta_join_ibss(sdata, bss->cbss.bssid, 217 __ieee80211_sta_join_ibss(sdata, cbss->bssid,
216 beacon_int, 218 beacon_int,
217 bss->cbss.channel, 219 cbss->channel,
218 basic_rates, 220 basic_rates,
219 bss->cbss.capability, 221 cbss->capability,
220 bss->cbss.tsf); 222 cbss->tsf);
221} 223}
222 224
223static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, 225static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
@@ -229,6 +231,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
229{ 231{
230 struct ieee80211_local *local = sdata->local; 232 struct ieee80211_local *local = sdata->local;
231 int freq; 233 int freq;
234 struct cfg80211_bss *cbss;
232 struct ieee80211_bss *bss; 235 struct ieee80211_bss *bss;
233 struct sta_info *sta; 236 struct sta_info *sta;
234 struct ieee80211_channel *channel; 237 struct ieee80211_channel *channel;
@@ -252,7 +255,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
252 255
253 rcu_read_lock(); 256 rcu_read_lock();
254 257
255 sta = sta_info_get(local, mgmt->sa); 258 sta = sta_info_get(sdata, mgmt->sa);
256 if (sta) { 259 if (sta) {
257 u32 prev_rates; 260 u32 prev_rates;
258 261
@@ -266,16 +269,18 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
266 printk(KERN_DEBUG "%s: updated supp_rates set " 269 printk(KERN_DEBUG "%s: updated supp_rates set "
267 "for %pM based on beacon info (0x%llx | " 270 "for %pM based on beacon info (0x%llx | "
268 "0x%llx -> 0x%llx)\n", 271 "0x%llx -> 0x%llx)\n",
269 sdata->dev->name, 272 sdata->name,
270 sta->sta.addr, 273 sta->sta.addr,
271 (unsigned long long) prev_rates, 274 (unsigned long long) prev_rates,
272 (unsigned long long) supp_rates, 275 (unsigned long long) supp_rates,
273 (unsigned long long) sta->sta.supp_rates[band]); 276 (unsigned long long) sta->sta.supp_rates[band]);
274#endif 277#endif
275 } else 278 rcu_read_unlock();
276 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates); 279 } else {
277 280 rcu_read_unlock();
278 rcu_read_unlock(); 281 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
282 supp_rates, GFP_KERNEL);
283 }
279 } 284 }
280 285
281 bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems, 286 bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
@@ -283,25 +288,23 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
283 if (!bss) 288 if (!bss)
284 return; 289 return;
285 290
291 cbss = container_of((void *)bss, struct cfg80211_bss, priv);
292
286 /* was just updated in ieee80211_bss_info_update */ 293 /* was just updated in ieee80211_bss_info_update */
287 beacon_timestamp = bss->cbss.tsf; 294 beacon_timestamp = cbss->tsf;
288 295
289 /* check if we need to merge IBSS */ 296 /* check if we need to merge IBSS */
290 297
291 /* merge only on beacons (???) */
292 if (!beacon)
293 goto put_bss;
294
295 /* we use a fixed BSSID */ 298 /* we use a fixed BSSID */
296 if (sdata->u.ibss.bssid) 299 if (sdata->u.ibss.fixed_bssid)
297 goto put_bss; 300 goto put_bss;
298 301
299 /* not an IBSS */ 302 /* not an IBSS */
300 if (!(bss->cbss.capability & WLAN_CAPABILITY_IBSS)) 303 if (!(cbss->capability & WLAN_CAPABILITY_IBSS))
301 goto put_bss; 304 goto put_bss;
302 305
303 /* different channel */ 306 /* different channel */
304 if (bss->cbss.channel != local->oper_channel) 307 if (cbss->channel != local->oper_channel)
305 goto put_bss; 308 goto put_bss;
306 309
307 /* different SSID */ 310 /* different SSID */
@@ -311,7 +314,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
311 goto put_bss; 314 goto put_bss;
312 315
313 /* same BSSID */ 316 /* same BSSID */
314 if (memcmp(bss->cbss.bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0) 317 if (memcmp(cbss->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0)
315 goto put_bss; 318 goto put_bss;
316 319
317 if (rx_status->flag & RX_FLAG_TSFT) { 320 if (rx_status->flag & RX_FLAG_TSFT) {
@@ -364,10 +367,11 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
364#ifdef CONFIG_MAC80211_IBSS_DEBUG 367#ifdef CONFIG_MAC80211_IBSS_DEBUG
365 printk(KERN_DEBUG "%s: beacon TSF higher than " 368 printk(KERN_DEBUG "%s: beacon TSF higher than "
366 "local TSF - IBSS merge with BSSID %pM\n", 369 "local TSF - IBSS merge with BSSID %pM\n",
367 sdata->dev->name, mgmt->bssid); 370 sdata->name, mgmt->bssid);
368#endif 371#endif
369 ieee80211_sta_join_ibss(sdata, bss); 372 ieee80211_sta_join_ibss(sdata, bss);
370 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates); 373 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
374 supp_rates, GFP_KERNEL);
371 } 375 }
372 376
373 put_bss: 377 put_bss:
@@ -380,7 +384,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
380 * must be callable in atomic context. 384 * must be callable in atomic context.
381 */ 385 */
382struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, 386struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
383 u8 *bssid,u8 *addr, u32 supp_rates) 387 u8 *bssid,u8 *addr, u32 supp_rates,
388 gfp_t gfp)
384{ 389{
385 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 390 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
386 struct ieee80211_local *local = sdata->local; 391 struct ieee80211_local *local = sdata->local;
@@ -394,7 +399,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
394 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { 399 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
395 if (net_ratelimit()) 400 if (net_ratelimit())
396 printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n", 401 printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
397 sdata->dev->name, addr); 402 sdata->name, addr);
398 return NULL; 403 return NULL;
399 } 404 }
400 405
@@ -406,10 +411,10 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
406 411
407#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 412#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
408 printk(KERN_DEBUG "%s: Adding new IBSS station %pM (dev=%s)\n", 413 printk(KERN_DEBUG "%s: Adding new IBSS station %pM (dev=%s)\n",
409 wiphy_name(local->hw.wiphy), addr, sdata->dev->name); 414 wiphy_name(local->hw.wiphy), addr, sdata->name);
410#endif 415#endif
411 416
412 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); 417 sta = sta_info_alloc(sdata, addr, gfp);
413 if (!sta) 418 if (!sta)
414 return NULL; 419 return NULL;
415 420
@@ -421,9 +426,9 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
421 426
422 rate_control_rate_init(sta); 427 rate_control_rate_init(sta);
423 428
429 /* If it fails, maybe we raced another insertion? */
424 if (sta_info_insert(sta)) 430 if (sta_info_insert(sta))
425 return NULL; 431 return sta_info_get(sdata, addr);
426
427 return sta; 432 return sta;
428} 433}
429 434
@@ -449,6 +454,9 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
449 return active; 454 return active;
450} 455}
451 456
457/*
458 * This function is called with state == IEEE80211_IBSS_MLME_JOINED
459 */
452 460
453static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata) 461static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
454{ 462{
@@ -470,7 +478,7 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
470 return; 478 return;
471 479
472 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " 480 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other "
473 "IBSS networks with same SSID (merge)\n", sdata->dev->name); 481 "IBSS networks with same SSID (merge)\n", sdata->name);
474 482
475 ieee80211_request_internal_scan(sdata, ifibss->ssid, ifibss->ssid_len); 483 ieee80211_request_internal_scan(sdata, ifibss->ssid, ifibss->ssid_len);
476} 484}
@@ -492,13 +500,13 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
492 * random number generator get different BSSID. */ 500 * random number generator get different BSSID. */
493 get_random_bytes(bssid, ETH_ALEN); 501 get_random_bytes(bssid, ETH_ALEN);
494 for (i = 0; i < ETH_ALEN; i++) 502 for (i = 0; i < ETH_ALEN; i++)
495 bssid[i] ^= sdata->dev->dev_addr[i]; 503 bssid[i] ^= sdata->vif.addr[i];
496 bssid[0] &= ~0x01; 504 bssid[0] &= ~0x01;
497 bssid[0] |= 0x02; 505 bssid[0] |= 0x02;
498 } 506 }
499 507
500 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n", 508 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n",
501 sdata->dev->name, bssid); 509 sdata->name, bssid);
502 510
503 sband = local->hw.wiphy->bands[ifibss->channel->band]; 511 sband = local->hw.wiphy->bands[ifibss->channel->band];
504 512
@@ -514,11 +522,15 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
514 capability, 0); 522 capability, 0);
515} 523}
516 524
525/*
526 * This function is called with state == IEEE80211_IBSS_MLME_SEARCH
527 */
528
517static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) 529static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
518{ 530{
519 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 531 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
520 struct ieee80211_local *local = sdata->local; 532 struct ieee80211_local *local = sdata->local;
521 struct ieee80211_bss *bss; 533 struct cfg80211_bss *cbss;
522 struct ieee80211_channel *chan = NULL; 534 struct ieee80211_channel *chan = NULL;
523 const u8 *bssid = NULL; 535 const u8 *bssid = NULL;
524 int active_ibss; 536 int active_ibss;
@@ -527,7 +539,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
527 active_ibss = ieee80211_sta_active_ibss(sdata); 539 active_ibss = ieee80211_sta_active_ibss(sdata);
528#ifdef CONFIG_MAC80211_IBSS_DEBUG 540#ifdef CONFIG_MAC80211_IBSS_DEBUG
529 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n", 541 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n",
530 sdata->dev->name, active_ibss); 542 sdata->name, active_ibss);
531#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 543#endif /* CONFIG_MAC80211_IBSS_DEBUG */
532 544
533 if (active_ibss) 545 if (active_ibss)
@@ -542,21 +554,23 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
542 chan = ifibss->channel; 554 chan = ifibss->channel;
543 if (!is_zero_ether_addr(ifibss->bssid)) 555 if (!is_zero_ether_addr(ifibss->bssid))
544 bssid = ifibss->bssid; 556 bssid = ifibss->bssid;
545 bss = (void *)cfg80211_get_bss(local->hw.wiphy, chan, bssid, 557 cbss = cfg80211_get_bss(local->hw.wiphy, chan, bssid,
546 ifibss->ssid, ifibss->ssid_len, 558 ifibss->ssid, ifibss->ssid_len,
547 WLAN_CAPABILITY_IBSS | 559 WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_PRIVACY,
548 WLAN_CAPABILITY_PRIVACY, 560 capability);
549 capability); 561
562 if (cbss) {
563 struct ieee80211_bss *bss;
550 564
551 if (bss) { 565 bss = (void *)cbss->priv;
552#ifdef CONFIG_MAC80211_IBSS_DEBUG 566#ifdef CONFIG_MAC80211_IBSS_DEBUG
553 printk(KERN_DEBUG " sta_find_ibss: selected %pM current " 567 printk(KERN_DEBUG " sta_find_ibss: selected %pM current "
554 "%pM\n", bss->cbss.bssid, ifibss->bssid); 568 "%pM\n", cbss->bssid, ifibss->bssid);
555#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 569#endif /* CONFIG_MAC80211_IBSS_DEBUG */
556 570
557 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM" 571 printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM"
558 " based on configured SSID\n", 572 " based on configured SSID\n",
559 sdata->dev->name, bss->cbss.bssid); 573 sdata->name, cbss->bssid);
560 574
561 ieee80211_sta_join_ibss(sdata, bss); 575 ieee80211_sta_join_ibss(sdata, bss);
562 ieee80211_rx_bss_put(local, bss); 576 ieee80211_rx_bss_put(local, bss);
@@ -568,18 +582,14 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
568#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 582#endif /* CONFIG_MAC80211_IBSS_DEBUG */
569 583
570 /* Selected IBSS not found in current scan results - try to scan */ 584 /* Selected IBSS not found in current scan results - try to scan */
571 if (ifibss->state == IEEE80211_IBSS_MLME_JOINED && 585 if (time_after(jiffies, ifibss->last_scan_completed +
572 !ieee80211_sta_active_ibss(sdata)) {
573 mod_timer(&ifibss->timer,
574 round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
575 } else if (time_after(jiffies, ifibss->last_scan_completed +
576 IEEE80211_SCAN_INTERVAL)) { 586 IEEE80211_SCAN_INTERVAL)) {
577 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " 587 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
578 "join\n", sdata->dev->name); 588 "join\n", sdata->name);
579 589
580 ieee80211_request_internal_scan(sdata, ifibss->ssid, 590 ieee80211_request_internal_scan(sdata, ifibss->ssid,
581 ifibss->ssid_len); 591 ifibss->ssid_len);
582 } else if (ifibss->state != IEEE80211_IBSS_MLME_JOINED) { 592 } else {
583 int interval = IEEE80211_SCAN_INTERVAL; 593 int interval = IEEE80211_SCAN_INTERVAL;
584 594
585 if (time_after(jiffies, ifibss->ibss_join_req + 595 if (time_after(jiffies, ifibss->ibss_join_req +
@@ -589,7 +599,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
589 return; 599 return;
590 } 600 }
591 printk(KERN_DEBUG "%s: IBSS not allowed on" 601 printk(KERN_DEBUG "%s: IBSS not allowed on"
592 " %d MHz\n", sdata->dev->name, 602 " %d MHz\n", sdata->name,
593 local->hw.conf.channel->center_freq); 603 local->hw.conf.channel->center_freq);
594 604
595 /* No IBSS found - decrease scan interval and continue 605 /* No IBSS found - decrease scan interval and continue
@@ -597,7 +607,6 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
597 interval = IEEE80211_SCAN_INTERVAL_SLOW; 607 interval = IEEE80211_SCAN_INTERVAL_SLOW;
598 } 608 }
599 609
600 ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
601 mod_timer(&ifibss->timer, 610 mod_timer(&ifibss->timer,
602 round_jiffies(jiffies + interval)); 611 round_jiffies(jiffies + interval));
603 } 612 }
@@ -623,7 +632,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
623#ifdef CONFIG_MAC80211_IBSS_DEBUG 632#ifdef CONFIG_MAC80211_IBSS_DEBUG
624 printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM" 633 printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM"
625 " (tx_last_beacon=%d)\n", 634 " (tx_last_beacon=%d)\n",
626 sdata->dev->name, mgmt->sa, mgmt->da, 635 sdata->name, mgmt->sa, mgmt->da,
627 mgmt->bssid, tx_last_beacon); 636 mgmt->bssid, tx_last_beacon);
628#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 637#endif /* CONFIG_MAC80211_IBSS_DEBUG */
629 638
@@ -641,7 +650,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
641#ifdef CONFIG_MAC80211_IBSS_DEBUG 650#ifdef CONFIG_MAC80211_IBSS_DEBUG
642 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " 651 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq "
643 "from %pM\n", 652 "from %pM\n",
644 sdata->dev->name, mgmt->sa); 653 sdata->name, mgmt->sa);
645#endif 654#endif
646 return; 655 return;
647 } 656 }
@@ -661,7 +670,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
661 memcpy(resp->da, mgmt->sa, ETH_ALEN); 670 memcpy(resp->da, mgmt->sa, ETH_ALEN);
662#ifdef CONFIG_MAC80211_IBSS_DEBUG 671#ifdef CONFIG_MAC80211_IBSS_DEBUG
663 printk(KERN_DEBUG "%s: Sending ProbeResp to %pM\n", 672 printk(KERN_DEBUG "%s: Sending ProbeResp to %pM\n",
664 sdata->dev->name, resp->da); 673 sdata->name, resp->da);
665#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 674#endif /* CONFIG_MAC80211_IBSS_DEBUG */
666 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 675 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
667 ieee80211_tx_skb(sdata, skb); 676 ieee80211_tx_skb(sdata, skb);
@@ -675,7 +684,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
675 size_t baselen; 684 size_t baselen;
676 struct ieee802_11_elems elems; 685 struct ieee802_11_elems elems;
677 686
678 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) 687 if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
679 return; /* ignore ProbeResp to foreign address */ 688 return; /* ignore ProbeResp to foreign address */
680 689
681 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 690 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -748,7 +757,7 @@ static void ieee80211_ibss_work(struct work_struct *work)
748 if (WARN_ON(local->suspended)) 757 if (WARN_ON(local->suspended))
749 return; 758 return;
750 759
751 if (!netif_running(sdata->dev)) 760 if (!ieee80211_sdata_running(sdata))
752 return; 761 return;
753 762
754 if (local->scanning) 763 if (local->scanning)
@@ -831,7 +840,7 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
831 840
832 mutex_lock(&local->iflist_mtx); 841 mutex_lock(&local->iflist_mtx);
833 list_for_each_entry(sdata, &local->interfaces, list) { 842 list_for_each_entry(sdata, &local->interfaces, list) {
834 if (!netif_running(sdata->dev)) 843 if (!ieee80211_sdata_running(sdata))
835 continue; 844 continue;
836 if (sdata->vif.type != NL80211_IFTYPE_ADHOC) 845 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
837 continue; 846 continue;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 91dc8636d64..9dd98b674cb 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -58,6 +58,15 @@ struct ieee80211_local;
58 58
59#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024)) 59#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024))
60 60
61#define IEEE80211_DEFAULT_UAPSD_QUEUES \
62 (IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \
63 IEEE80211_WMM_IE_STA_QOSINFO_AC_BE | \
64 IEEE80211_WMM_IE_STA_QOSINFO_AC_VI | \
65 IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
66
67#define IEEE80211_DEFAULT_MAX_SP_LEN \
68 IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
69
61struct ieee80211_fragment_entry { 70struct ieee80211_fragment_entry {
62 unsigned long first_frag_time; 71 unsigned long first_frag_time;
63 unsigned int seq; 72 unsigned int seq;
@@ -71,9 +80,6 @@ struct ieee80211_fragment_entry {
71 80
72 81
73struct ieee80211_bss { 82struct ieee80211_bss {
74 /* Yes, this is a hack */
75 struct cfg80211_bss cbss;
76
77 /* don't want to look up all the time */ 83 /* don't want to look up all the time */
78 size_t ssid_len; 84 size_t ssid_len;
79 u8 ssid[IEEE80211_MAX_SSID_LEN]; 85 u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -81,6 +87,7 @@ struct ieee80211_bss {
81 u8 dtim_period; 87 u8 dtim_period;
82 88
83 bool wmm_used; 89 bool wmm_used;
90 bool uapsd_supported;
84 91
85 unsigned long last_probe_resp; 92 unsigned long last_probe_resp;
86 93
@@ -140,7 +147,6 @@ typedef unsigned __bitwise__ ieee80211_tx_result;
140 147
141struct ieee80211_tx_data { 148struct ieee80211_tx_data {
142 struct sk_buff *skb; 149 struct sk_buff *skb;
143 struct net_device *dev;
144 struct ieee80211_local *local; 150 struct ieee80211_local *local;
145 struct ieee80211_sub_if_data *sdata; 151 struct ieee80211_sub_if_data *sdata;
146 struct sta_info *sta; 152 struct sta_info *sta;
@@ -228,31 +234,77 @@ struct mesh_preq_queue {
228 u8 flags; 234 u8 flags;
229}; 235};
230 236
231enum ieee80211_mgd_state { 237enum ieee80211_work_type {
232 IEEE80211_MGD_STATE_IDLE, 238 IEEE80211_WORK_ABORT,
233 IEEE80211_MGD_STATE_PROBE, 239 IEEE80211_WORK_DIRECT_PROBE,
234 IEEE80211_MGD_STATE_AUTH, 240 IEEE80211_WORK_AUTH,
235 IEEE80211_MGD_STATE_ASSOC, 241 IEEE80211_WORK_ASSOC,
242 IEEE80211_WORK_REMAIN_ON_CHANNEL,
236}; 243};
237 244
238struct ieee80211_mgd_work { 245/**
246 * enum work_done_result - indicates what to do after work was done
247 *
248 * @WORK_DONE_DESTROY: This work item is no longer needed, destroy.
249 * @WORK_DONE_REQUEUE: This work item was reset to be reused, and
250 * should be requeued.
251 */
252enum work_done_result {
253 WORK_DONE_DESTROY,
254 WORK_DONE_REQUEUE,
255};
256
257struct ieee80211_work {
239 struct list_head list; 258 struct list_head list;
240 struct ieee80211_bss *bss; 259
241 int ie_len; 260 struct rcu_head rcu_head;
242 u8 prev_bssid[ETH_ALEN]; 261
243 u8 ssid[IEEE80211_MAX_SSID_LEN]; 262 struct ieee80211_sub_if_data *sdata;
244 u8 ssid_len; 263
264 enum work_done_result (*done)(struct ieee80211_work *wk,
265 struct sk_buff *skb);
266
267 struct ieee80211_channel *chan;
268 enum nl80211_channel_type chan_type;
269
245 unsigned long timeout; 270 unsigned long timeout;
246 enum ieee80211_mgd_state state; 271 enum ieee80211_work_type type;
247 u16 auth_alg, auth_transaction; 272
273 u8 filter_ta[ETH_ALEN];
248 274
249 int tries; 275 bool started;
250 276
251 u8 key[WLAN_KEY_LEN_WEP104]; 277 union {
252 u8 key_len, key_idx; 278 struct {
279 int tries;
280 u16 algorithm, transaction;
281 u8 ssid[IEEE80211_MAX_SSID_LEN];
282 u8 ssid_len;
283 u8 key[WLAN_KEY_LEN_WEP104];
284 u8 key_len, key_idx;
285 bool privacy;
286 } probe_auth;
287 struct {
288 struct cfg80211_bss *bss;
289 const u8 *supp_rates;
290 const u8 *ht_information_ie;
291 enum ieee80211_smps_mode smps;
292 int tries;
293 u16 capability;
294 u8 prev_bssid[ETH_ALEN];
295 u8 ssid[IEEE80211_MAX_SSID_LEN];
296 u8 ssid_len;
297 u8 supp_rates_len;
298 bool wmm_used, use_11n, uapsd_used;
299 } assoc;
300 struct {
301 u32 duration;
302 } remain;
303 };
253 304
305 int ie_len;
254 /* must be last */ 306 /* must be last */
255 u8 ie[0]; /* for auth or assoc frame, not probe */ 307 u8 ie[0];
256}; 308};
257 309
258/* flags used in struct ieee80211_if_managed.flags */ 310/* flags used in struct ieee80211_if_managed.flags */
@@ -260,15 +312,11 @@ enum ieee80211_sta_flags {
260 IEEE80211_STA_BEACON_POLL = BIT(0), 312 IEEE80211_STA_BEACON_POLL = BIT(0),
261 IEEE80211_STA_CONNECTION_POLL = BIT(1), 313 IEEE80211_STA_CONNECTION_POLL = BIT(1),
262 IEEE80211_STA_CONTROL_PORT = BIT(2), 314 IEEE80211_STA_CONTROL_PORT = BIT(2),
263 IEEE80211_STA_WMM_ENABLED = BIT(3),
264 IEEE80211_STA_DISABLE_11N = BIT(4), 315 IEEE80211_STA_DISABLE_11N = BIT(4),
265 IEEE80211_STA_CSA_RECEIVED = BIT(5), 316 IEEE80211_STA_CSA_RECEIVED = BIT(5),
266 IEEE80211_STA_MFP_ENABLED = BIT(6), 317 IEEE80211_STA_MFP_ENABLED = BIT(6),
267}; 318 IEEE80211_STA_UAPSD_ENABLED = BIT(7),
268 319 IEEE80211_STA_NULLFUNC_ACKED = BIT(8),
269/* flags for MLME request */
270enum ieee80211_sta_request {
271 IEEE80211_STA_REQ_SCAN,
272}; 320};
273 321
274struct ieee80211_if_managed { 322struct ieee80211_if_managed {
@@ -285,21 +333,18 @@ struct ieee80211_if_managed {
285 int probe_send_count; 333 int probe_send_count;
286 334
287 struct mutex mtx; 335 struct mutex mtx;
288 struct ieee80211_bss *associated; 336 struct cfg80211_bss *associated;
289 struct ieee80211_mgd_work *old_associate_work;
290 struct list_head work_list;
291 337
292 u8 bssid[ETH_ALEN]; 338 u8 bssid[ETH_ALEN];
293 339
294 u16 aid; 340 u16 aid;
295 u16 capab;
296 341
297 struct sk_buff_head skb_queue; 342 struct sk_buff_head skb_queue;
298 343
299 unsigned long timers_running; /* used for quiesce/restart */ 344 unsigned long timers_running; /* used for quiesce/restart */
300 bool powersave; /* powersave requested for this iface */ 345 bool powersave; /* powersave requested for this iface */
301 346 enum ieee80211_smps_mode req_smps, /* requested smps mode */
302 unsigned long request; 347 ap_smps; /* smps mode AP thinks we're in */
303 348
304 unsigned int flags; 349 unsigned int flags;
305 350
@@ -433,6 +478,8 @@ struct ieee80211_sub_if_data {
433 478
434 int drop_unencrypted; 479 int drop_unencrypted;
435 480
481 char name[IFNAMSIZ];
482
436 /* 483 /*
437 * keep track of whether the HT opmode (stored in 484 * keep track of whether the HT opmode (stored in
438 * vif.bss_info.ht_operation_mode) is valid. 485 * vif.bss_info.ht_operation_mode) is valid.
@@ -458,8 +505,8 @@ struct ieee80211_sub_if_data {
458 */ 505 */
459 struct ieee80211_if_ap *bss; 506 struct ieee80211_if_ap *bss;
460 507
461 int force_unicast_rateidx; /* forced TX rateidx for unicast frames */ 508 /* bitmap of allowed (non-MCS) rate indexes for rate control */
462 int max_ratectrl_rateidx; /* max TX rateidx for rate control */ 509 u32 rc_rateidx_mask[IEEE80211_NUM_BANDS];
463 510
464 union { 511 union {
465 struct ieee80211_if_ap ap; 512 struct ieee80211_if_ap ap;
@@ -565,6 +612,15 @@ struct ieee80211_local {
565 const struct ieee80211_ops *ops; 612 const struct ieee80211_ops *ops;
566 613
567 /* 614 /*
615 * work stuff, potentially off-channel (in the future)
616 */
617 struct mutex work_mtx;
618 struct list_head work_list;
619 struct timer_list work_timer;
620 struct work_struct work_work;
621 struct sk_buff_head work_skb_queue;
622
623 /*
568 * private workqueue to mac80211. mac80211 makes this accessible 624 * private workqueue to mac80211. mac80211 makes this accessible
569 * via ieee80211_queue_work() 625 * via ieee80211_queue_work()
570 */ 626 */
@@ -586,6 +642,9 @@ struct ieee80211_local {
586 /* used for uploading changed mc list */ 642 /* used for uploading changed mc list */
587 struct work_struct reconfig_filter; 643 struct work_struct reconfig_filter;
588 644
645 /* used to reconfigure hardware SM PS */
646 struct work_struct recalc_smps;
647
589 /* aggregated multicast list */ 648 /* aggregated multicast list */
590 struct dev_addr_list *mc_list; 649 struct dev_addr_list *mc_list;
591 int mc_count; 650 int mc_count;
@@ -630,15 +689,18 @@ struct ieee80211_local {
630 689
631 /* Station data */ 690 /* Station data */
632 /* 691 /*
633 * The lock only protects the list, hash, timer and counter 692 * The mutex only protects the list and counter,
634 * against manipulation, reads are done in RCU. Additionally, 693 * reads are done in RCU.
635 * the lock protects each BSS's TIM bitmap. 694 * Additionally, the lock protects the hash table,
695 * the pending list and each BSS's TIM bitmap.
636 */ 696 */
697 struct mutex sta_mtx;
637 spinlock_t sta_lock; 698 spinlock_t sta_lock;
638 unsigned long num_sta; 699 unsigned long num_sta;
639 struct list_head sta_list; 700 struct list_head sta_list, sta_pending_list;
640 struct sta_info *sta_hash[STA_HASH_SIZE]; 701 struct sta_info *sta_hash[STA_HASH_SIZE];
641 struct timer_list sta_cleanup; 702 struct timer_list sta_cleanup;
703 struct work_struct sta_finish_work;
642 int sta_generation; 704 int sta_generation;
643 705
644 struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; 706 struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
@@ -689,6 +751,10 @@ struct ieee80211_local {
689 enum nl80211_channel_type oper_channel_type; 751 enum nl80211_channel_type oper_channel_type;
690 struct ieee80211_channel *oper_channel, *csa_channel; 752 struct ieee80211_channel *oper_channel, *csa_channel;
691 753
754 /* Temporary remain-on-channel for off-channel operations */
755 struct ieee80211_channel *tmp_channel;
756 enum nl80211_channel_type tmp_channel_type;
757
692 /* SNMP counters */ 758 /* SNMP counters */
693 /* dot11CountersTable */ 759 /* dot11CountersTable */
694 u32 dot11TransmittedFragmentCount; 760 u32 dot11TransmittedFragmentCount;
@@ -708,10 +774,6 @@ struct ieee80211_local {
708 assoc_led_name[32], radio_led_name[32]; 774 assoc_led_name[32], radio_led_name[32];
709#endif 775#endif
710 776
711#ifdef CONFIG_MAC80211_DEBUGFS
712 struct work_struct sta_debugfs_add;
713#endif
714
715#ifdef CONFIG_MAC80211_DEBUG_COUNTERS 777#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
716 /* TX/RX handler statistics */ 778 /* TX/RX handler statistics */
717 unsigned int tx_handlers_drop; 779 unsigned int tx_handlers_drop;
@@ -745,8 +807,22 @@ struct ieee80211_local {
745 int wifi_wme_noack_test; 807 int wifi_wme_noack_test;
746 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ 808 unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
747 809
810 /*
811 * Bitmask of enabled u-apsd queues,
812 * IEEE80211_WMM_IE_STA_QOSINFO_AC_BE & co. Needs a new association
813 * to take effect.
814 */
815 unsigned int uapsd_queues;
816
817 /*
818 * Maximum number of buffered frames AP can deliver during a
819 * service period, IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL or similar.
820 * Needs a new association to take effect.
821 */
822 unsigned int uapsd_max_sp_len;
823
748 bool pspolling; 824 bool pspolling;
749 bool scan_ps_enabled; 825 bool offchannel_ps_enabled;
750 /* 826 /*
751 * PS can only be enabled when we have exactly one managed 827 * PS can only be enabled when we have exactly one managed
752 * interface (and monitors) in PS, this then points there. 828 * interface (and monitors) in PS, this then points there.
@@ -760,6 +836,8 @@ struct ieee80211_local {
760 int user_power_level; /* in dBm */ 836 int user_power_level; /* in dBm */
761 int power_constr_level; /* in dBm */ 837 int power_constr_level; /* in dBm */
762 838
839 enum ieee80211_smps_mode smps_mode;
840
763 struct work_struct restart_work; 841 struct work_struct restart_work;
764 842
765#ifdef CONFIG_MAC80211_DEBUGFS 843#ifdef CONFIG_MAC80211_DEBUGFS
@@ -874,6 +952,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
874void ieee80211_configure_filter(struct ieee80211_local *local); 952void ieee80211_configure_filter(struct ieee80211_local *local);
875u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata); 953u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
876 954
955extern bool ieee80211_disable_40mhz_24ghz;
956
877/* STA code */ 957/* STA code */
878void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata); 958void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata);
879int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, 959int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
@@ -905,7 +985,8 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata);
905ieee80211_rx_result 985ieee80211_rx_result
906ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); 986ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
907struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, 987struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
908 u8 *bssid, u8 *addr, u32 supp_rates); 988 u8 *bssid, u8 *addr, u32 supp_rates,
989 gfp_t gfp);
909int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, 990int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
910 struct cfg80211_ibss_params *params); 991 struct cfg80211_ibss_params *params);
911int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata); 992int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
@@ -937,7 +1018,15 @@ ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
937void ieee80211_rx_bss_put(struct ieee80211_local *local, 1018void ieee80211_rx_bss_put(struct ieee80211_local *local,
938 struct ieee80211_bss *bss); 1019 struct ieee80211_bss *bss);
939 1020
1021/* off-channel helpers */
1022void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local);
1023void ieee80211_offchannel_stop_station(struct ieee80211_local *local);
1024void ieee80211_offchannel_return(struct ieee80211_local *local,
1025 bool enable_beaconing);
1026
940/* interface handling */ 1027/* interface handling */
1028int ieee80211_iface_init(void);
1029void ieee80211_iface_exit(void);
941int ieee80211_if_add(struct ieee80211_local *local, const char *name, 1030int ieee80211_if_add(struct ieee80211_local *local, const char *name,
942 struct net_device **new_dev, enum nl80211_iftype type, 1031 struct net_device **new_dev, enum nl80211_iftype type,
943 struct vif_params *params); 1032 struct vif_params *params);
@@ -948,6 +1037,11 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local);
948u32 __ieee80211_recalc_idle(struct ieee80211_local *local); 1037u32 __ieee80211_recalc_idle(struct ieee80211_local *local);
949void ieee80211_recalc_idle(struct ieee80211_local *local); 1038void ieee80211_recalc_idle(struct ieee80211_local *local);
950 1039
1040static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
1041{
1042 return netif_running(sdata->dev);
1043}
1044
951/* tx handling */ 1045/* tx handling */
952void ieee80211_clear_tx_pending(struct ieee80211_local *local); 1046void ieee80211_clear_tx_pending(struct ieee80211_local *local);
953void ieee80211_tx_pending(unsigned long data); 1047void ieee80211_tx_pending(unsigned long data);
@@ -976,6 +1070,9 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
976void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, 1070void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
977 const u8 *da, u16 tid, 1071 const u8 *da, u16 tid,
978 u16 initiator, u16 reason_code); 1072 u16 initiator, u16 reason_code);
1073int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
1074 enum ieee80211_smps_mode smps, const u8 *da,
1075 const u8 *bssid);
979 1076
980void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da, 1077void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da,
981 u16 tid, u16 initiator, u16 reason); 1078 u16 tid, u16 initiator, u16 reason);
@@ -1086,6 +1183,28 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
1086u32 ieee80211_sta_get_rates(struct ieee80211_local *local, 1183u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
1087 struct ieee802_11_elems *elems, 1184 struct ieee802_11_elems *elems,
1088 enum ieee80211_band band); 1185 enum ieee80211_band band);
1186int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
1187 enum ieee80211_smps_mode smps_mode);
1188void ieee80211_recalc_smps(struct ieee80211_local *local,
1189 struct ieee80211_sub_if_data *forsdata);
1190
1191size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
1192 const u8 *ids, int n_ids, size_t offset);
1193size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
1194
1195/* internal work items */
1196void ieee80211_work_init(struct ieee80211_local *local);
1197void ieee80211_add_work(struct ieee80211_work *wk);
1198void free_work(struct ieee80211_work *wk);
1199void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata);
1200ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
1201 struct sk_buff *skb);
1202int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1203 struct ieee80211_channel *chan,
1204 enum nl80211_channel_type channel_type,
1205 unsigned int duration, u64 *cookie);
1206int ieee80211_wk_cancel_remain_on_channel(
1207 struct ieee80211_sub_if_data *sdata, u64 cookie);
1089 1208
1090#ifdef CONFIG_MAC80211_NOINLINE 1209#ifdef CONFIG_MAC80211_NOINLINE
1091#define debug_noinline noinline 1210#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 32abae3ce32..09fff4662e8 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -62,6 +62,23 @@ static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
62 return 0; 62 return 0;
63} 63}
64 64
65static int ieee80211_change_mac(struct net_device *dev, void *addr)
66{
67 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
68 struct sockaddr *sa = addr;
69 int ret;
70
71 if (ieee80211_sdata_running(sdata))
72 return -EBUSY;
73
74 ret = eth_mac_addr(dev, sa);
75
76 if (ret == 0)
77 memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN);
78
79 return ret;
80}
81
65static inline int identical_mac_addr_allowed(int type1, int type2) 82static inline int identical_mac_addr_allowed(int type1, int type2)
66{ 83{
67 return type1 == NL80211_IFTYPE_MONITOR || 84 return type1 == NL80211_IFTYPE_MONITOR ||
@@ -82,7 +99,6 @@ static int ieee80211_open(struct net_device *dev)
82 struct ieee80211_sub_if_data *nsdata; 99 struct ieee80211_sub_if_data *nsdata;
83 struct ieee80211_local *local = sdata->local; 100 struct ieee80211_local *local = sdata->local;
84 struct sta_info *sta; 101 struct sta_info *sta;
85 struct ieee80211_if_init_conf conf;
86 u32 changed = 0; 102 u32 changed = 0;
87 int res; 103 int res;
88 u32 hw_reconf_flags = 0; 104 u32 hw_reconf_flags = 0;
@@ -97,7 +113,7 @@ static int ieee80211_open(struct net_device *dev)
97 list_for_each_entry(nsdata, &local->interfaces, list) { 113 list_for_each_entry(nsdata, &local->interfaces, list) {
98 struct net_device *ndev = nsdata->dev; 114 struct net_device *ndev = nsdata->dev;
99 115
100 if (ndev != dev && netif_running(ndev)) { 116 if (ndev != dev && ieee80211_sdata_running(nsdata)) {
101 /* 117 /*
102 * Allow only a single IBSS interface to be up at any 118 * Allow only a single IBSS interface to be up at any
103 * time. This is restricted because beacon distribution 119 * time. This is restricted because beacon distribution
@@ -183,7 +199,7 @@ static int ieee80211_open(struct net_device *dev)
183 struct net_device *ndev = nsdata->dev; 199 struct net_device *ndev = nsdata->dev;
184 200
185 /* 201 /*
186 * No need to check netif_running since we do not allow 202 * No need to check running since we do not allow
187 * it to start up with this invalid address. 203 * it to start up with this invalid address.
188 */ 204 */
189 if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) { 205 if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) {
@@ -234,10 +250,7 @@ static int ieee80211_open(struct net_device *dev)
234 ieee80211_configure_filter(local); 250 ieee80211_configure_filter(local);
235 break; 251 break;
236 default: 252 default:
237 conf.vif = &sdata->vif; 253 res = drv_add_interface(local, &sdata->vif);
238 conf.type = sdata->vif.type;
239 conf.mac_addr = dev->dev_addr;
240 res = drv_add_interface(local, &conf);
241 if (res) 254 if (res)
242 goto err_stop; 255 goto err_stop;
243 256
@@ -320,7 +333,7 @@ static int ieee80211_open(struct net_device *dev)
320 333
321 return 0; 334 return 0;
322 err_del_interface: 335 err_del_interface:
323 drv_remove_interface(local, &conf); 336 drv_remove_interface(local, &sdata->vif);
324 err_stop: 337 err_stop:
325 if (!local->open_count) 338 if (!local->open_count)
326 drv_stop(local); 339 drv_stop(local);
@@ -335,7 +348,6 @@ static int ieee80211_stop(struct net_device *dev)
335{ 348{
336 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 349 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
337 struct ieee80211_local *local = sdata->local; 350 struct ieee80211_local *local = sdata->local;
338 struct ieee80211_if_init_conf conf;
339 struct sta_info *sta; 351 struct sta_info *sta;
340 unsigned long flags; 352 unsigned long flags;
341 struct sk_buff *skb, *tmp; 353 struct sk_buff *skb, *tmp;
@@ -348,6 +360,11 @@ static int ieee80211_stop(struct net_device *dev)
348 netif_tx_stop_all_queues(dev); 360 netif_tx_stop_all_queues(dev);
349 361
350 /* 362 /*
363 * Purge work for this interface.
364 */
365 ieee80211_work_purge(sdata);
366
367 /*
351 * Now delete all active aggregation sessions. 368 * Now delete all active aggregation sessions.
352 */ 369 */
353 rcu_read_lock(); 370 rcu_read_lock();
@@ -514,12 +531,9 @@ static int ieee80211_stop(struct net_device *dev)
514 BSS_CHANGED_BEACON_ENABLED); 531 BSS_CHANGED_BEACON_ENABLED);
515 } 532 }
516 533
517 conf.vif = &sdata->vif;
518 conf.type = sdata->vif.type;
519 conf.mac_addr = dev->dev_addr;
520 /* disable all keys for as long as this netdev is down */ 534 /* disable all keys for as long as this netdev is down */
521 ieee80211_disable_keys(sdata); 535 ieee80211_disable_keys(sdata);
522 drv_remove_interface(local, &conf); 536 drv_remove_interface(local, &sdata->vif);
523 } 537 }
524 538
525 sdata->bss = NULL; 539 sdata->bss = NULL;
@@ -659,7 +673,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
659 .ndo_start_xmit = ieee80211_subif_start_xmit, 673 .ndo_start_xmit = ieee80211_subif_start_xmit,
660 .ndo_set_multicast_list = ieee80211_set_multicast_list, 674 .ndo_set_multicast_list = ieee80211_set_multicast_list,
661 .ndo_change_mtu = ieee80211_change_mtu, 675 .ndo_change_mtu = ieee80211_change_mtu,
662 .ndo_set_mac_address = eth_mac_addr, 676 .ndo_set_mac_address = ieee80211_change_mac,
663 .ndo_select_queue = ieee80211_netdev_select_queue, 677 .ndo_select_queue = ieee80211_netdev_select_queue,
664}; 678};
665 679
@@ -681,10 +695,14 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
681 695
682 hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len)); 696 hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
683 697
684 if (!ieee80211_is_data_qos(hdr->frame_control)) { 698 if (!ieee80211_is_data(hdr->frame_control)) {
685 skb->priority = 7; 699 skb->priority = 7;
686 return ieee802_1d_to_ac[skb->priority]; 700 return ieee802_1d_to_ac[skb->priority];
687 } 701 }
702 if (!ieee80211_is_data_qos(hdr->frame_control)) {
703 skb->priority = 0;
704 return ieee802_1d_to_ac[skb->priority];
705 }
688 706
689 p = ieee80211_get_qos_ctl(hdr); 707 p = ieee80211_get_qos_ctl(hdr);
690 skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; 708 skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
@@ -779,7 +797,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
779 * and goes into the requested mode. 797 * and goes into the requested mode.
780 */ 798 */
781 799
782 if (netif_running(sdata->dev)) 800 if (ieee80211_sdata_running(sdata))
783 return -EBUSY; 801 return -EBUSY;
784 802
785 /* Purge and reset type-dependent state. */ 803 /* Purge and reset type-dependent state. */
@@ -833,6 +851,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
833 /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ 851 /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */
834 sdata = netdev_priv(ndev); 852 sdata = netdev_priv(ndev);
835 ndev->ieee80211_ptr = &sdata->wdev; 853 ndev->ieee80211_ptr = &sdata->wdev;
854 memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
855 memcpy(sdata->name, ndev->name, IFNAMSIZ);
836 856
837 /* initialise type-independent data */ 857 /* initialise type-independent data */
838 sdata->wdev.wiphy = local->hw.wiphy; 858 sdata->wdev.wiphy = local->hw.wiphy;
@@ -844,8 +864,12 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
844 864
845 INIT_LIST_HEAD(&sdata->key_list); 865 INIT_LIST_HEAD(&sdata->key_list);
846 866
847 sdata->force_unicast_rateidx = -1; 867 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
848 sdata->max_ratectrl_rateidx = -1; 868 struct ieee80211_supported_band *sband;
869 sband = local->hw.wiphy->bands[i];
870 sdata->rc_rateidx_mask[i] =
871 sband ? (1 << sband->n_bitrates) - 1 : 0;
872 }
849 873
850 /* setup type-dependent data */ 874 /* setup type-dependent data */
851 ieee80211_setup_sdata(sdata, type); 875 ieee80211_setup_sdata(sdata, type);
@@ -938,6 +962,8 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local)
938 wiphy_name(local->hw.wiphy)); 962 wiphy_name(local->hw.wiphy));
939#endif 963#endif
940 964
965 drv_flush(local, false);
966
941 local->hw.conf.flags |= IEEE80211_CONF_IDLE; 967 local->hw.conf.flags |= IEEE80211_CONF_IDLE;
942 return IEEE80211_CONF_CHANGE_IDLE; 968 return IEEE80211_CONF_CHANGE_IDLE;
943} 969}
@@ -947,16 +973,18 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
947 struct ieee80211_sub_if_data *sdata; 973 struct ieee80211_sub_if_data *sdata;
948 int count = 0; 974 int count = 0;
949 975
976 if (!list_empty(&local->work_list))
977 return ieee80211_idle_off(local, "working");
978
950 if (local->scanning) 979 if (local->scanning)
951 return ieee80211_idle_off(local, "scanning"); 980 return ieee80211_idle_off(local, "scanning");
952 981
953 list_for_each_entry(sdata, &local->interfaces, list) { 982 list_for_each_entry(sdata, &local->interfaces, list) {
954 if (!netif_running(sdata->dev)) 983 if (!ieee80211_sdata_running(sdata))
955 continue; 984 continue;
956 /* do not count disabled managed interfaces */ 985 /* do not count disabled managed interfaces */
957 if (sdata->vif.type == NL80211_IFTYPE_STATION && 986 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
958 !sdata->u.mgd.associated && 987 !sdata->u.mgd.associated)
959 list_empty(&sdata->u.mgd.work_list))
960 continue; 988 continue;
961 /* do not count unused IBSS interfaces */ 989 /* do not count unused IBSS interfaces */
962 if (sdata->vif.type == NL80211_IFTYPE_ADHOC && 990 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
@@ -984,3 +1012,41 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
984 if (chg) 1012 if (chg)
985 ieee80211_hw_config(local, chg); 1013 ieee80211_hw_config(local, chg);
986} 1014}
1015
1016static int netdev_notify(struct notifier_block *nb,
1017 unsigned long state,
1018 void *ndev)
1019{
1020 struct net_device *dev = ndev;
1021 struct ieee80211_sub_if_data *sdata;
1022
1023 if (state != NETDEV_CHANGENAME)
1024 return 0;
1025
1026 if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
1027 return 0;
1028
1029 if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
1030 return 0;
1031
1032 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1033
1034 memcpy(sdata->name, sdata->name, IFNAMSIZ);
1035
1036 ieee80211_debugfs_rename_netdev(sdata);
1037 return 0;
1038}
1039
1040static struct notifier_block mac80211_netdev_notifier = {
1041 .notifier_call = netdev_notify,
1042};
1043
1044int ieee80211_iface_init(void)
1045{
1046 return register_netdevice_notifier(&mac80211_netdev_notifier);
1047}
1048
1049void ieee80211_iface_exit(void)
1050{
1051 unregister_netdevice_notifier(&mac80211_netdev_notifier);
1052}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 659a42d529e..8160d9c5372 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -139,7 +139,7 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
139 struct ieee80211_sub_if_data, 139 struct ieee80211_sub_if_data,
140 u.ap); 140 u.ap);
141 141
142 ret = drv_set_key(key->local, SET_KEY, &sdata->vif, sta, &key->conf); 142 ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf);
143 143
144 if (!ret) { 144 if (!ret) {
145 spin_lock_bh(&todo_lock); 145 spin_lock_bh(&todo_lock);
@@ -181,7 +181,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
181 struct ieee80211_sub_if_data, 181 struct ieee80211_sub_if_data,
182 u.ap); 182 u.ap);
183 183
184 ret = drv_set_key(key->local, DISABLE_KEY, &sdata->vif, 184 ret = drv_set_key(key->local, DISABLE_KEY, sdata,
185 sta, &key->conf); 185 sta, &key->conf);
186 186
187 if (ret) 187 if (ret)
@@ -421,7 +421,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
421 */ 421 */
422 422
423 /* same here, the AP could be using QoS */ 423 /* same here, the AP could be using QoS */
424 ap = sta_info_get(key->local, key->sdata->u.mgd.bssid); 424 ap = sta_info_get(key->sdata, key->sdata->u.mgd.bssid);
425 if (ap) { 425 if (ap) {
426 if (test_sta_flags(ap, WLAN_STA_WME)) 426 if (test_sta_flags(ap, WLAN_STA_WME))
427 key->conf.flags |= 427 key->conf.flags |=
@@ -443,7 +443,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
443 add_todo(old_key, KEY_FLAG_TODO_DELETE); 443 add_todo(old_key, KEY_FLAG_TODO_DELETE);
444 444
445 add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS); 445 add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS);
446 if (netif_running(sdata->dev)) 446 if (ieee80211_sdata_running(sdata))
447 add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD); 447 add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD);
448 448
449 spin_unlock_irqrestore(&sdata->local->key_lock, flags); 449 spin_unlock_irqrestore(&sdata->local->key_lock, flags);
@@ -509,7 +509,7 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
509{ 509{
510 ASSERT_RTNL(); 510 ASSERT_RTNL();
511 511
512 if (WARN_ON(!netif_running(sdata->dev))) 512 if (WARN_ON(!ieee80211_sdata_running(sdata)))
513 return; 513 return;
514 514
515 ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_ADD); 515 ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_ADD);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index a49f93b79e9..bdc2968c2bb 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -59,11 +59,17 @@ enum ieee80211_internal_key_flags {
59 KEY_FLAG_TODO_DEFMGMTKEY = BIT(6), 59 KEY_FLAG_TODO_DEFMGMTKEY = BIT(6),
60}; 60};
61 61
62enum ieee80211_internal_tkip_state {
63 TKIP_STATE_NOT_INIT,
64 TKIP_STATE_PHASE1_DONE,
65 TKIP_STATE_PHASE1_HW_UPLOADED,
66};
67
62struct tkip_ctx { 68struct tkip_ctx {
63 u32 iv32; 69 u32 iv32;
64 u16 iv16; 70 u16 iv16;
65 u16 p1k[5]; 71 u16 p1k[5];
66 int initialized; 72 enum ieee80211_internal_tkip_state state;
67}; 73};
68 74
69struct ieee80211_key { 75struct ieee80211_key {
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 0d2d94881f1..ec8f767ba95 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -17,7 +17,6 @@
17#include <linux/skbuff.h> 17#include <linux/skbuff.h>
18#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/if_arp.h> 19#include <linux/if_arp.h>
20#include <linux/wireless.h>
21#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
22#include <linux/bitmap.h> 21#include <linux/bitmap.h>
23#include <linux/pm_qos_params.h> 22#include <linux/pm_qos_params.h>
@@ -32,7 +31,12 @@
32#include "led.h" 31#include "led.h"
33#include "cfg.h" 32#include "cfg.h"
34#include "debugfs.h" 33#include "debugfs.h"
35#include "debugfs_netdev.h" 34
35
36bool ieee80211_disable_40mhz_24ghz;
37module_param(ieee80211_disable_40mhz_24ghz, bool, 0644);
38MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz,
39 "Disable 40MHz support in the 2.4GHz band");
36 40
37void ieee80211_configure_filter(struct ieee80211_local *local) 41void ieee80211_configure_filter(struct ieee80211_local *local)
38{ 42{
@@ -102,6 +106,9 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
102 if (scan_chan) { 106 if (scan_chan) {
103 chan = scan_chan; 107 chan = scan_chan;
104 channel_type = NL80211_CHAN_NO_HT; 108 channel_type = NL80211_CHAN_NO_HT;
109 } else if (local->tmp_channel) {
110 chan = scan_chan = local->tmp_channel;
111 channel_type = local->tmp_channel_type;
105 } else { 112 } else {
106 chan = local->oper_channel; 113 chan = local->oper_channel;
107 channel_type = local->oper_channel_type; 114 channel_type = local->oper_channel_type;
@@ -114,6 +121,18 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
114 changed |= IEEE80211_CONF_CHANGE_CHANNEL; 121 changed |= IEEE80211_CONF_CHANGE_CHANNEL;
115 } 122 }
116 123
124 if (!conf_is_ht(&local->hw.conf)) {
125 /*
126 * mac80211.h documents that this is only valid
127 * when the channel is set to an HT type, and
128 * that otherwise STATIC is used.
129 */
130 local->hw.conf.smps_mode = IEEE80211_SMPS_STATIC;
131 } else if (local->hw.conf.smps_mode != local->smps_mode) {
132 local->hw.conf.smps_mode = local->smps_mode;
133 changed |= IEEE80211_CONF_CHANGE_SMPS;
134 }
135
117 if (scan_chan) 136 if (scan_chan)
118 power = chan->max_power; 137 power = chan->max_power;
119 else 138 else
@@ -173,7 +192,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
173 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 192 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
174 sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid; 193 sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid;
175 else if (sdata->vif.type == NL80211_IFTYPE_AP) 194 else if (sdata->vif.type == NL80211_IFTYPE_AP)
176 sdata->vif.bss_conf.bssid = sdata->dev->dev_addr; 195 sdata->vif.bss_conf.bssid = sdata->vif.addr;
177 else if (ieee80211_vif_is_mesh(&sdata->vif)) { 196 else if (ieee80211_vif_is_mesh(&sdata->vif)) {
178 sdata->vif.bss_conf.bssid = zero; 197 sdata->vif.bss_conf.bssid = zero;
179 } else { 198 } else {
@@ -195,7 +214,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
195 } 214 }
196 215
197 if (changed & BSS_CHANGED_BEACON_ENABLED) { 216 if (changed & BSS_CHANGED_BEACON_ENABLED) {
198 if (local->quiescing || !netif_running(sdata->dev) || 217 if (local->quiescing || !ieee80211_sdata_running(sdata) ||
199 test_bit(SCAN_SW_SCANNING, &local->scanning)) { 218 test_bit(SCAN_SW_SCANNING, &local->scanning)) {
200 sdata->vif.bss_conf.enable_beacon = false; 219 sdata->vif.bss_conf.enable_beacon = false;
201 } else { 220 } else {
@@ -223,8 +242,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
223 } 242 }
224 } 243 }
225 244
226 drv_bss_info_changed(local, &sdata->vif, 245 drv_bss_info_changed(local, sdata, &sdata->vif.bss_conf, changed);
227 &sdata->vif.bss_conf, changed);
228} 246}
229 247
230u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) 248u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
@@ -299,6 +317,16 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
299} 317}
300EXPORT_SYMBOL(ieee80211_restart_hw); 318EXPORT_SYMBOL(ieee80211_restart_hw);
301 319
320static void ieee80211_recalc_smps_work(struct work_struct *work)
321{
322 struct ieee80211_local *local =
323 container_of(work, struct ieee80211_local, recalc_smps);
324
325 mutex_lock(&local->iflist_mtx);
326 ieee80211_recalc_smps(local, NULL);
327 mutex_unlock(&local->iflist_mtx);
328}
329
302struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, 330struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
303 const struct ieee80211_ops *ops) 331 const struct ieee80211_ops *ops)
304{ 332{
@@ -333,9 +361,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
333 WIPHY_FLAG_4ADDR_STATION; 361 WIPHY_FLAG_4ADDR_STATION;
334 wiphy->privid = mac80211_wiphy_privid; 362 wiphy->privid = mac80211_wiphy_privid;
335 363
336 /* Yes, putting cfg80211_bss into ieee80211_bss is a hack */ 364 wiphy->bss_priv_size = sizeof(struct ieee80211_bss);
337 wiphy->bss_priv_size = sizeof(struct ieee80211_bss) -
338 sizeof(struct cfg80211_bss);
339 365
340 local = wiphy_priv(wiphy); 366 local = wiphy_priv(wiphy);
341 367
@@ -358,6 +384,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
358 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; 384 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
359 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; 385 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
360 local->user_power_level = -1; 386 local->user_power_level = -1;
387 local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES;
388 local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
361 389
362 INIT_LIST_HEAD(&local->interfaces); 390 INIT_LIST_HEAD(&local->interfaces);
363 mutex_init(&local->iflist_mtx); 391 mutex_init(&local->iflist_mtx);
@@ -369,9 +397,13 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
369 397
370 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); 398 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
371 399
400 ieee80211_work_init(local);
401
372 INIT_WORK(&local->restart_work, ieee80211_restart_work); 402 INIT_WORK(&local->restart_work, ieee80211_restart_work);
373 403
374 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter); 404 INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
405 INIT_WORK(&local->recalc_smps, ieee80211_recalc_smps_work);
406 local->smps_mode = IEEE80211_SMPS_OFF;
375 407
376 INIT_WORK(&local->dynamic_ps_enable_work, 408 INIT_WORK(&local->dynamic_ps_enable_work,
377 ieee80211_dynamic_ps_enable_work); 409 ieee80211_dynamic_ps_enable_work);
@@ -461,6 +493,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
461 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) 493 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
462 local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; 494 local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
463 495
496 WARN((local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
497 && (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK),
498 "U-APSD not supported with HW_PS_NULLFUNC_STACK\n");
499
464 /* 500 /*
465 * Calculate scan IE length -- we need this to alloc 501 * Calculate scan IE length -- we need this to alloc
466 * memory and to subtract from the driver limit. It 502 * memory and to subtract from the driver limit. It
@@ -674,11 +710,19 @@ static int __init ieee80211_init(void)
674 710
675 ret = rc80211_pid_init(); 711 ret = rc80211_pid_init();
676 if (ret) 712 if (ret)
677 return ret; 713 goto err_pid;
678 714
679 ieee80211_debugfs_netdev_init(); 715 ret = ieee80211_iface_init();
716 if (ret)
717 goto err_netdev;
680 718
681 return 0; 719 return 0;
720 err_netdev:
721 rc80211_pid_exit();
722 err_pid:
723 rc80211_minstrel_exit();
724
725 return ret;
682} 726}
683 727
684static void __exit ieee80211_exit(void) 728static void __exit ieee80211_exit(void)
@@ -695,7 +739,7 @@ static void __exit ieee80211_exit(void)
695 if (mesh_allocated) 739 if (mesh_allocated)
696 ieee80211s_stop(); 740 ieee80211s_stop();
697 741
698 ieee80211_debugfs_netdev_exit(); 742 ieee80211_iface_exit();
699} 743}
700 744
701 745
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 6a433142959..61080c5fad5 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -457,7 +457,7 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
457 457
458#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 458#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
459 printk(KERN_DEBUG "%s: running mesh housekeeping\n", 459 printk(KERN_DEBUG "%s: running mesh housekeeping\n",
460 sdata->dev->name); 460 sdata->name);
461#endif 461#endif
462 462
463 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); 463 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
@@ -565,7 +565,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
565 565
566 /* ignore ProbeResp to foreign address */ 566 /* ignore ProbeResp to foreign address */
567 if (stype == IEEE80211_STYPE_PROBE_RESP && 567 if (stype == IEEE80211_STYPE_PROBE_RESP &&
568 compare_ether_addr(mgmt->da, sdata->dev->dev_addr)) 568 compare_ether_addr(mgmt->da, sdata->vif.addr))
569 return; 569 return;
570 570
571 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 571 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -645,7 +645,7 @@ static void ieee80211_mesh_work(struct work_struct *work)
645 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 645 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
646 struct sk_buff *skb; 646 struct sk_buff *skb;
647 647
648 if (!netif_running(sdata->dev)) 648 if (!ieee80211_sdata_running(sdata))
649 return; 649 return;
650 650
651 if (local->scanning) 651 if (local->scanning)
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index d28acb6b1f8..ce84237ebad 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -128,9 +128,9 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
128 IEEE80211_STYPE_ACTION); 128 IEEE80211_STYPE_ACTION);
129 129
130 memcpy(mgmt->da, da, ETH_ALEN); 130 memcpy(mgmt->da, da, ETH_ALEN);
131 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 131 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
132 /* BSSID == SA */ 132 /* BSSID == SA */
133 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 133 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
134 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 134 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
135 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; 135 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
136 136
@@ -222,7 +222,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
222 IEEE80211_STYPE_ACTION); 222 IEEE80211_STYPE_ACTION);
223 223
224 memcpy(mgmt->da, ra, ETH_ALEN); 224 memcpy(mgmt->da, ra, ETH_ALEN);
225 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 225 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
226 /* BSSID is left zeroed, wildcard value */ 226 /* BSSID is left zeroed, wildcard value */
227 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 227 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
228 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; 228 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
@@ -335,7 +335,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
335 bool process = true; 335 bool process = true;
336 336
337 rcu_read_lock(); 337 rcu_read_lock();
338 sta = sta_info_get(local, mgmt->sa); 338 sta = sta_info_get(sdata, mgmt->sa);
339 if (!sta) { 339 if (!sta) {
340 rcu_read_unlock(); 340 rcu_read_unlock();
341 return 0; 341 return 0;
@@ -374,7 +374,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
374 new_metric = MAX_METRIC; 374 new_metric = MAX_METRIC;
375 exp_time = TU_TO_EXP_TIME(orig_lifetime); 375 exp_time = TU_TO_EXP_TIME(orig_lifetime);
376 376
377 if (memcmp(orig_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) { 377 if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) {
378 /* This MP is the originator, we are not interested in this 378 /* This MP is the originator, we are not interested in this
379 * frame, except for updating transmitter's path info. 379 * frame, except for updating transmitter's path info.
380 */ 380 */
@@ -486,7 +486,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
486 486
487 mhwmp_dbg("received PREQ from %pM\n", orig_addr); 487 mhwmp_dbg("received PREQ from %pM\n", orig_addr);
488 488
489 if (memcmp(target_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) { 489 if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) {
490 mhwmp_dbg("PREQ is for us\n"); 490 mhwmp_dbg("PREQ is for us\n");
491 forward = false; 491 forward = false;
492 reply = true; 492 reply = true;
@@ -579,7 +579,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
579 * replies 579 * replies
580 */ 580 */
581 target_addr = PREP_IE_TARGET_ADDR(prep_elem); 581 target_addr = PREP_IE_TARGET_ADDR(prep_elem);
582 if (memcmp(target_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) 582 if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0)
583 /* destination, no forwarding required */ 583 /* destination, no forwarding required */
584 return; 584 return;
585 585
@@ -890,7 +890,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
890 target_flags = MP_F_RF; 890 target_flags = MP_F_RF;
891 891
892 spin_unlock_bh(&mpath->state_lock); 892 spin_unlock_bh(&mpath->state_lock);
893 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr, 893 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr,
894 cpu_to_le32(ifmsh->sn), target_flags, mpath->dst, 894 cpu_to_le32(ifmsh->sn), target_flags, mpath->dst,
895 cpu_to_le32(mpath->sn), broadcast_addr, 0, 895 cpu_to_le32(mpath->sn), broadcast_addr, 0,
896 ttl, cpu_to_le32(lifetime), 0, 896 ttl, cpu_to_le32(lifetime), 0,
@@ -939,7 +939,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
939 if (time_after(jiffies, 939 if (time_after(jiffies,
940 mpath->exp_time - 940 mpath->exp_time -
941 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) && 941 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
942 !memcmp(sdata->dev->dev_addr, hdr->addr4, ETH_ALEN) && 942 !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) &&
943 !(mpath->flags & MESH_PATH_RESOLVING) && 943 !(mpath->flags & MESH_PATH_RESOLVING) &&
944 !(mpath->flags & MESH_PATH_FIXED)) { 944 !(mpath->flags & MESH_PATH_FIXED)) {
945 mesh_queue_preq(mpath, 945 mesh_queue_preq(mpath,
@@ -1010,7 +1010,7 @@ mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
1010{ 1010{
1011 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 1011 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1012 1012
1013 mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->dev->dev_addr, 1013 mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->vif.addr,
1014 cpu_to_le32(++ifmsh->sn), 1014 cpu_to_le32(++ifmsh->sn),
1015 0, NULL, 0, broadcast_addr, 1015 0, NULL, 0, broadcast_addr,
1016 0, MESH_TTL, 0, 0, 0, sdata); 1016 0, MESH_TTL, 0, 0, 0, sdata);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 0192cfdacae..2312efe04c6 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -260,7 +260,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
260 int err = 0; 260 int err = 0;
261 u32 hash_idx; 261 u32 hash_idx;
262 262
263 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) 263 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
264 /* never add ourselves as neighbours */ 264 /* never add ourselves as neighbours */
265 return -ENOTSUPP; 265 return -ENOTSUPP;
266 266
@@ -377,7 +377,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
377 int err = 0; 377 int err = 0;
378 u32 hash_idx; 378 u32 hash_idx;
379 379
380 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) 380 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
381 /* never add ourselves as neighbours */ 381 /* never add ourselves as neighbours */
382 return -ENOTSUPP; 382 return -ENOTSUPP;
383 383
@@ -605,7 +605,7 @@ void mesh_path_discard_frame(struct sk_buff *skb,
605 struct mesh_path *mpath; 605 struct mesh_path *mpath;
606 u32 sn = 0; 606 u32 sn = 0;
607 607
608 if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) { 608 if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
609 u8 *ra, *da; 609 u8 *ra, *da;
610 610
611 da = hdr->addr3; 611 da = hdr->addr3;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 0f7c6e6a424..bc4e20e57ff 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -102,7 +102,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
102 if (local->num_sta >= MESH_MAX_PLINKS) 102 if (local->num_sta >= MESH_MAX_PLINKS)
103 return NULL; 103 return NULL;
104 104
105 sta = sta_info_alloc(sdata, hw_addr, GFP_ATOMIC); 105 sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
106 if (!sta) 106 if (!sta)
107 return NULL; 107 return NULL;
108 108
@@ -169,7 +169,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
169 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 169 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
170 IEEE80211_STYPE_ACTION); 170 IEEE80211_STYPE_ACTION);
171 memcpy(mgmt->da, da, ETH_ALEN); 171 memcpy(mgmt->da, da, ETH_ALEN);
172 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 172 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
173 /* BSSID is left zeroed, wildcard value */ 173 /* BSSID is left zeroed, wildcard value */
174 mgmt->u.action.category = MESH_PLINK_CATEGORY; 174 mgmt->u.action.category = MESH_PLINK_CATEGORY;
175 mgmt->u.action.u.plink_action.action_code = action; 175 mgmt->u.action.u.plink_action.action_code = action;
@@ -234,14 +234,14 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data
234 234
235 rcu_read_lock(); 235 rcu_read_lock();
236 236
237 sta = sta_info_get(local, hw_addr); 237 sta = sta_info_get(sdata, hw_addr);
238 if (!sta) { 238 if (!sta) {
239 rcu_read_unlock();
240
239 sta = mesh_plink_alloc(sdata, hw_addr, rates); 241 sta = mesh_plink_alloc(sdata, hw_addr, rates);
240 if (!sta) { 242 if (!sta)
241 rcu_read_unlock();
242 return; 243 return;
243 } 244 if (sta_info_insert_rcu(sta)) {
244 if (sta_info_insert(sta)) {
245 rcu_read_unlock(); 245 rcu_read_unlock();
246 return; 246 return;
247 } 247 }
@@ -455,7 +455,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
455 455
456 rcu_read_lock(); 456 rcu_read_lock();
457 457
458 sta = sta_info_get(local, mgmt->sa); 458 sta = sta_info_get(sdata, mgmt->sa);
459 if (!sta && ftype != PLINK_OPEN) { 459 if (!sta && ftype != PLINK_OPEN) {
460 mpl_dbg("Mesh plink: cls or cnf from unknown peer\n"); 460 mpl_dbg("Mesh plink: cls or cnf from unknown peer\n");
461 rcu_read_unlock(); 461 rcu_read_unlock();
@@ -485,9 +485,11 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
485 } else if (!sta) { 485 } else if (!sta) {
486 /* ftype == PLINK_OPEN */ 486 /* ftype == PLINK_OPEN */
487 u32 rates; 487 u32 rates;
488
489 rcu_read_unlock();
490
488 if (!mesh_plink_free_count(sdata)) { 491 if (!mesh_plink_free_count(sdata)) {
489 mpl_dbg("Mesh plink error: no more free plinks\n"); 492 mpl_dbg("Mesh plink error: no more free plinks\n");
490 rcu_read_unlock();
491 return; 493 return;
492 } 494 }
493 495
@@ -495,10 +497,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
495 sta = mesh_plink_alloc(sdata, mgmt->sa, rates); 497 sta = mesh_plink_alloc(sdata, mgmt->sa, rates);
496 if (!sta) { 498 if (!sta) {
497 mpl_dbg("Mesh plink error: plink table full\n"); 499 mpl_dbg("Mesh plink error: plink table full\n");
498 rcu_read_unlock();
499 return; 500 return;
500 } 501 }
501 if (sta_info_insert(sta)) { 502 if (sta_info_insert_rcu(sta)) {
502 rcu_read_unlock(); 503 rcu_read_unlock();
503 return; 504 return;
504 } 505 }
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 05a18f43e1b..bfc4a507001 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -27,10 +27,6 @@
27#include "rate.h" 27#include "rate.h"
28#include "led.h" 28#include "led.h"
29 29
30#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
31#define IEEE80211_AUTH_MAX_TRIES 3
32#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
33#define IEEE80211_ASSOC_MAX_TRIES 3
34#define IEEE80211_MAX_PROBE_TRIES 5 30#define IEEE80211_MAX_PROBE_TRIES 5
35 31
36/* 32/*
@@ -75,11 +71,8 @@ enum rx_mgmt_action {
75 /* caller must call cfg80211_send_disassoc() */ 71 /* caller must call cfg80211_send_disassoc() */
76 RX_MGMT_CFG80211_DISASSOC, 72 RX_MGMT_CFG80211_DISASSOC,
77 73
78 /* caller must call cfg80211_auth_timeout() & free work */ 74 /* caller must tell cfg80211 about internal error */
79 RX_MGMT_CFG80211_AUTH_TO, 75 RX_MGMT_CFG80211_ASSOC_ERROR,
80
81 /* caller must call cfg80211_assoc_timeout() & free work */
82 RX_MGMT_CFG80211_ASSOC_TO,
83}; 76};
84 77
85/* utils */ 78/* utils */
@@ -122,27 +115,6 @@ static int ecw2cw(int ecw)
122 return (1 << ecw) - 1; 115 return (1 << ecw) - 1;
123} 116}
124 117
125static int ieee80211_compatible_rates(struct ieee80211_bss *bss,
126 struct ieee80211_supported_band *sband,
127 u32 *rates)
128{
129 int i, j, count;
130 *rates = 0;
131 count = 0;
132 for (i = 0; i < bss->supp_rates_len; i++) {
133 int rate = (bss->supp_rates[i] & 0x7F) * 5;
134
135 for (j = 0; j < sband->n_bitrates; j++)
136 if (sband->bitrates[j].bitrate == rate) {
137 *rates |= BIT(j);
138 count++;
139 break;
140 }
141 }
142
143 return count;
144}
145
146/* 118/*
147 * ieee80211_enable_ht should be called only after the operating band 119 * ieee80211_enable_ht should be called only after the operating band
148 * has been determined as ht configuration depends on the hw's 120 * has been determined as ht configuration depends on the hw's
@@ -202,7 +174,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
202 ieee80211_hw_config(local, 0); 174 ieee80211_hw_config(local, 0);
203 175
204 rcu_read_lock(); 176 rcu_read_lock();
205 sta = sta_info_get(local, bssid); 177 sta = sta_info_get(sdata, bssid);
206 if (sta) 178 if (sta)
207 rate_control_rate_update(local, sband, sta, 179 rate_control_rate_update(local, sband, sta,
208 IEEE80211_RC_HT_CHANGED); 180 IEEE80211_RC_HT_CHANGED);
@@ -228,209 +200,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
228 200
229/* frame sending functions */ 201/* frame sending functions */
230 202
231static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
232 struct ieee80211_mgd_work *wk)
233{
234 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
235 struct ieee80211_local *local = sdata->local;
236 struct sk_buff *skb;
237 struct ieee80211_mgmt *mgmt;
238 u8 *pos;
239 const u8 *ies, *ht_ie;
240 int i, len, count, rates_len, supp_rates_len;
241 u16 capab;
242 int wmm = 0;
243 struct ieee80211_supported_band *sband;
244 u32 rates = 0;
245
246 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
247 sizeof(*mgmt) + 200 + wk->ie_len +
248 wk->ssid_len);
249 if (!skb) {
250 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
251 "frame\n", sdata->dev->name);
252 return;
253 }
254 skb_reserve(skb, local->hw.extra_tx_headroom);
255
256 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
257
258 capab = ifmgd->capab;
259
260 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) {
261 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
262 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
263 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
264 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
265 }
266
267 if (wk->bss->cbss.capability & WLAN_CAPABILITY_PRIVACY)
268 capab |= WLAN_CAPABILITY_PRIVACY;
269 if (wk->bss->wmm_used)
270 wmm = 1;
271
272 /* get all rates supported by the device and the AP as
273 * some APs don't like getting a superset of their rates
274 * in the association request (e.g. D-Link DAP 1353 in
275 * b-only mode) */
276 rates_len = ieee80211_compatible_rates(wk->bss, sband, &rates);
277
278 if ((wk->bss->cbss.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
279 (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
280 capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
281
282 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
283 memset(mgmt, 0, 24);
284 memcpy(mgmt->da, wk->bss->cbss.bssid, ETH_ALEN);
285 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
286 memcpy(mgmt->bssid, wk->bss->cbss.bssid, ETH_ALEN);
287
288 if (!is_zero_ether_addr(wk->prev_bssid)) {
289 skb_put(skb, 10);
290 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
291 IEEE80211_STYPE_REASSOC_REQ);
292 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
293 mgmt->u.reassoc_req.listen_interval =
294 cpu_to_le16(local->hw.conf.listen_interval);
295 memcpy(mgmt->u.reassoc_req.current_ap, wk->prev_bssid,
296 ETH_ALEN);
297 } else {
298 skb_put(skb, 4);
299 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
300 IEEE80211_STYPE_ASSOC_REQ);
301 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
302 mgmt->u.assoc_req.listen_interval =
303 cpu_to_le16(local->hw.conf.listen_interval);
304 }
305
306 /* SSID */
307 ies = pos = skb_put(skb, 2 + wk->ssid_len);
308 *pos++ = WLAN_EID_SSID;
309 *pos++ = wk->ssid_len;
310 memcpy(pos, wk->ssid, wk->ssid_len);
311
312 /* add all rates which were marked to be used above */
313 supp_rates_len = rates_len;
314 if (supp_rates_len > 8)
315 supp_rates_len = 8;
316
317 len = sband->n_bitrates;
318 pos = skb_put(skb, supp_rates_len + 2);
319 *pos++ = WLAN_EID_SUPP_RATES;
320 *pos++ = supp_rates_len;
321
322 count = 0;
323 for (i = 0; i < sband->n_bitrates; i++) {
324 if (BIT(i) & rates) {
325 int rate = sband->bitrates[i].bitrate;
326 *pos++ = (u8) (rate / 5);
327 if (++count == 8)
328 break;
329 }
330 }
331
332 if (rates_len > count) {
333 pos = skb_put(skb, rates_len - count + 2);
334 *pos++ = WLAN_EID_EXT_SUPP_RATES;
335 *pos++ = rates_len - count;
336
337 for (i++; i < sband->n_bitrates; i++) {
338 if (BIT(i) & rates) {
339 int rate = sband->bitrates[i].bitrate;
340 *pos++ = (u8) (rate / 5);
341 }
342 }
343 }
344
345 if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
346 /* 1. power capabilities */
347 pos = skb_put(skb, 4);
348 *pos++ = WLAN_EID_PWR_CAPABILITY;
349 *pos++ = 2;
350 *pos++ = 0; /* min tx power */
351 *pos++ = local->hw.conf.channel->max_power; /* max tx power */
352
353 /* 2. supported channels */
354 /* TODO: get this in reg domain format */
355 pos = skb_put(skb, 2 * sband->n_channels + 2);
356 *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
357 *pos++ = 2 * sband->n_channels;
358 for (i = 0; i < sband->n_channels; i++) {
359 *pos++ = ieee80211_frequency_to_channel(
360 sband->channels[i].center_freq);
361 *pos++ = 1; /* one channel in the subband*/
362 }
363 }
364
365 if (wk->ie_len && wk->ie) {
366 pos = skb_put(skb, wk->ie_len);
367 memcpy(pos, wk->ie, wk->ie_len);
368 }
369
370 if (wmm && (ifmgd->flags & IEEE80211_STA_WMM_ENABLED)) {
371 pos = skb_put(skb, 9);
372 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
373 *pos++ = 7; /* len */
374 *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
375 *pos++ = 0x50;
376 *pos++ = 0xf2;
377 *pos++ = 2; /* WME */
378 *pos++ = 0; /* WME info */
379 *pos++ = 1; /* WME ver */
380 *pos++ = 0;
381 }
382
383 /* wmm support is a must to HT */
384 /*
385 * IEEE802.11n does not allow TKIP/WEP as pairwise
386 * ciphers in HT mode. We still associate in non-ht
387 * mode (11a/b/g) if any one of these ciphers is
388 * configured as pairwise.
389 */
390 if (wmm && (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) &&
391 sband->ht_cap.ht_supported &&
392 (ht_ie = ieee80211_bss_get_ie(&wk->bss->cbss, WLAN_EID_HT_INFORMATION)) &&
393 ht_ie[1] >= sizeof(struct ieee80211_ht_info) &&
394 (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))) {
395 struct ieee80211_ht_info *ht_info =
396 (struct ieee80211_ht_info *)(ht_ie + 2);
397 u16 cap = sband->ht_cap.cap;
398 __le16 tmp;
399 u32 flags = local->hw.conf.channel->flags;
400
401 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
402 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
403 if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
404 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
405 cap &= ~IEEE80211_HT_CAP_SGI_40;
406 }
407 break;
408 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
409 if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
410 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
411 cap &= ~IEEE80211_HT_CAP_SGI_40;
412 }
413 break;
414 }
415
416 tmp = cpu_to_le16(cap);
417 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2);
418 *pos++ = WLAN_EID_HT_CAPABILITY;
419 *pos++ = sizeof(struct ieee80211_ht_cap);
420 memset(pos, 0, sizeof(struct ieee80211_ht_cap));
421 memcpy(pos, &tmp, sizeof(u16));
422 pos += sizeof(u16);
423 /* TODO: needs a define here for << 2 */
424 *pos++ = sband->ht_cap.ampdu_factor |
425 (sband->ht_cap.ampdu_density << 2);
426 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
427 }
428
429 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
430 ieee80211_tx_skb(sdata, skb);
431}
432
433
434static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, 203static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
435 const u8 *bssid, u16 stype, u16 reason, 204 const u8 *bssid, u16 stype, u16 reason,
436 void *cookie) 205 void *cookie)
@@ -443,7 +212,7 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
443 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); 212 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
444 if (!skb) { 213 if (!skb) {
445 printk(KERN_DEBUG "%s: failed to allocate buffer for " 214 printk(KERN_DEBUG "%s: failed to allocate buffer for "
446 "deauth/disassoc frame\n", sdata->dev->name); 215 "deauth/disassoc frame\n", sdata->name);
447 return; 216 return;
448 } 217 }
449 skb_reserve(skb, local->hw.extra_tx_headroom); 218 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -451,7 +220,7 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
451 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 220 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
452 memset(mgmt, 0, 24); 221 memset(mgmt, 0, 24);
453 memcpy(mgmt->da, bssid, ETH_ALEN); 222 memcpy(mgmt->da, bssid, ETH_ALEN);
454 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 223 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
455 memcpy(mgmt->bssid, bssid, ETH_ALEN); 224 memcpy(mgmt->bssid, bssid, ETH_ALEN);
456 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype); 225 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
457 skb_put(skb, 2); 226 skb_put(skb, 2);
@@ -476,30 +245,15 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
476void ieee80211_send_pspoll(struct ieee80211_local *local, 245void ieee80211_send_pspoll(struct ieee80211_local *local,
477 struct ieee80211_sub_if_data *sdata) 246 struct ieee80211_sub_if_data *sdata)
478{ 247{
479 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
480 struct ieee80211_pspoll *pspoll; 248 struct ieee80211_pspoll *pspoll;
481 struct sk_buff *skb; 249 struct sk_buff *skb;
482 u16 fc;
483 250
484 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll)); 251 skb = ieee80211_pspoll_get(&local->hw, &sdata->vif);
485 if (!skb) { 252 if (!skb)
486 printk(KERN_DEBUG "%s: failed to allocate buffer for "
487 "pspoll frame\n", sdata->dev->name);
488 return; 253 return;
489 }
490 skb_reserve(skb, local->hw.extra_tx_headroom);
491
492 pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
493 memset(pspoll, 0, sizeof(*pspoll));
494 fc = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL | IEEE80211_FCTL_PM;
495 pspoll->frame_control = cpu_to_le16(fc);
496 pspoll->aid = cpu_to_le16(ifmgd->aid);
497
498 /* aid in PS-Poll has its two MSBs each set to 1 */
499 pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
500 254
501 memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN); 255 pspoll = (struct ieee80211_pspoll *) skb->data;
502 memcpy(pspoll->ta, sdata->dev->dev_addr, ETH_ALEN); 256 pspoll->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
503 257
504 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 258 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
505 ieee80211_tx_skb(sdata, skb); 259 ieee80211_tx_skb(sdata, skb);
@@ -510,30 +264,47 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
510 int powersave) 264 int powersave)
511{ 265{
512 struct sk_buff *skb; 266 struct sk_buff *skb;
267 struct ieee80211_hdr_3addr *nullfunc;
268
269 skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif);
270 if (!skb)
271 return;
272
273 nullfunc = (struct ieee80211_hdr_3addr *) skb->data;
274 if (powersave)
275 nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
276
277 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
278 ieee80211_tx_skb(sdata, skb);
279}
280
281static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
282 struct ieee80211_sub_if_data *sdata)
283{
284 struct sk_buff *skb;
513 struct ieee80211_hdr *nullfunc; 285 struct ieee80211_hdr *nullfunc;
514 __le16 fc; 286 __le16 fc;
515 287
516 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) 288 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
517 return; 289 return;
518 290
519 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24); 291 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
520 if (!skb) { 292 if (!skb) {
521 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc " 293 printk(KERN_DEBUG "%s: failed to allocate buffer for 4addr "
522 "frame\n", sdata->dev->name); 294 "nullfunc frame\n", sdata->name);
523 return; 295 return;
524 } 296 }
525 skb_reserve(skb, local->hw.extra_tx_headroom); 297 skb_reserve(skb, local->hw.extra_tx_headroom);
526 298
527 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24); 299 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 30);
528 memset(nullfunc, 0, 24); 300 memset(nullfunc, 0, 30);
529 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | 301 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
530 IEEE80211_FCTL_TODS); 302 IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
531 if (powersave)
532 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
533 nullfunc->frame_control = fc; 303 nullfunc->frame_control = fc;
534 memcpy(nullfunc->addr1, sdata->u.mgd.bssid, ETH_ALEN); 304 memcpy(nullfunc->addr1, sdata->u.mgd.bssid, ETH_ALEN);
535 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN); 305 memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
536 memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN); 306 memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN);
307 memcpy(nullfunc->addr4, sdata->vif.addr, ETH_ALEN);
537 308
538 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 309 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
539 ieee80211_tx_skb(sdata, skb); 310 ieee80211_tx_skb(sdata, skb);
@@ -546,7 +317,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
546 container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work); 317 container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work);
547 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 318 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
548 319
549 if (!netif_running(sdata->dev)) 320 if (!ieee80211_sdata_running(sdata))
550 return; 321 return;
551 322
552 mutex_lock(&ifmgd->mtx); 323 mutex_lock(&ifmgd->mtx);
@@ -557,7 +328,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
557 ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL); 328 ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL);
558 329
559 /* XXX: shouldn't really modify cfg80211-owned data! */ 330 /* XXX: shouldn't really modify cfg80211-owned data! */
560 ifmgd->associated->cbss.channel = sdata->local->oper_channel; 331 ifmgd->associated->channel = sdata->local->oper_channel;
561 332
562 ieee80211_wake_queues_by_reason(&sdata->local->hw, 333 ieee80211_wake_queues_by_reason(&sdata->local->hw,
563 IEEE80211_QUEUE_STOP_REASON_CSA); 334 IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -584,6 +355,8 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
584 struct ieee80211_channel_sw_ie *sw_elem, 355 struct ieee80211_channel_sw_ie *sw_elem,
585 struct ieee80211_bss *bss) 356 struct ieee80211_bss *bss)
586{ 357{
358 struct cfg80211_bss *cbss =
359 container_of((void *)bss, struct cfg80211_bss, priv);
587 struct ieee80211_channel *new_ch; 360 struct ieee80211_channel *new_ch;
588 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 361 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
589 int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num); 362 int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num);
@@ -617,7 +390,7 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
617 mod_timer(&ifmgd->chswitch_timer, 390 mod_timer(&ifmgd->chswitch_timer,
618 jiffies + 391 jiffies +
619 msecs_to_jiffies(sw_elem->count * 392 msecs_to_jiffies(sw_elem->count *
620 bss->cbss.beacon_interval)); 393 cbss->beacon_interval));
621 } 394 }
622} 395}
623 396
@@ -661,8 +434,11 @@ static void ieee80211_enable_ps(struct ieee80211_local *local,
661 } else { 434 } else {
662 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) 435 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
663 ieee80211_send_nullfunc(local, sdata, 1); 436 ieee80211_send_nullfunc(local, sdata, 1);
664 conf->flags |= IEEE80211_CONF_PS; 437
665 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 438 if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
439 conf->flags |= IEEE80211_CONF_PS;
440 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
441 }
666 } 442 }
667} 443}
668 444
@@ -691,8 +467,13 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
691 return; 467 return;
692 } 468 }
693 469
470 if (!list_empty(&local->work_list)) {
471 local->ps_sdata = NULL;
472 goto change;
473 }
474
694 list_for_each_entry(sdata, &local->interfaces, list) { 475 list_for_each_entry(sdata, &local->interfaces, list) {
695 if (!netif_running(sdata->dev)) 476 if (!ieee80211_sdata_running(sdata))
696 continue; 477 continue;
697 if (sdata->vif.type != NL80211_IFTYPE_STATION) 478 if (sdata->vif.type != NL80211_IFTYPE_STATION)
698 continue; 479 continue;
@@ -701,7 +482,8 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
701 } 482 }
702 483
703 if (count == 1 && found->u.mgd.powersave && 484 if (count == 1 && found->u.mgd.powersave &&
704 found->u.mgd.associated && list_empty(&found->u.mgd.work_list) && 485 found->u.mgd.associated &&
486 found->u.mgd.associated->beacon_ies &&
705 !(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL | 487 !(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
706 IEEE80211_STA_CONNECTION_POLL))) { 488 IEEE80211_STA_CONNECTION_POLL))) {
707 s32 beaconint_us; 489 s32 beaconint_us;
@@ -715,20 +497,29 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
715 if (beaconint_us > latency) { 497 if (beaconint_us > latency) {
716 local->ps_sdata = NULL; 498 local->ps_sdata = NULL;
717 } else { 499 } else {
718 u8 dtimper = found->vif.bss_conf.dtim_period; 500 struct ieee80211_bss *bss;
719 int maxslp = 1; 501 int maxslp = 1;
502 u8 dtimper;
720 503
721 if (dtimper > 1) 504 bss = (void *)found->u.mgd.associated->priv;
505 dtimper = bss->dtim_period;
506
507 /* If the TIM IE is invalid, pretend the value is 1 */
508 if (!dtimper)
509 dtimper = 1;
510 else if (dtimper > 1)
722 maxslp = min_t(int, dtimper, 511 maxslp = min_t(int, dtimper,
723 latency / beaconint_us); 512 latency / beaconint_us);
724 513
725 local->hw.conf.max_sleep_period = maxslp; 514 local->hw.conf.max_sleep_period = maxslp;
515 local->hw.conf.ps_dtim_period = dtimper;
726 local->ps_sdata = found; 516 local->ps_sdata = found;
727 } 517 }
728 } else { 518 } else {
729 local->ps_sdata = NULL; 519 local->ps_sdata = NULL;
730 } 520 }
731 521
522 change:
732 ieee80211_change_ps(local); 523 ieee80211_change_ps(local);
733} 524}
734 525
@@ -753,6 +544,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
753 container_of(work, struct ieee80211_local, 544 container_of(work, struct ieee80211_local,
754 dynamic_ps_enable_work); 545 dynamic_ps_enable_work);
755 struct ieee80211_sub_if_data *sdata = local->ps_sdata; 546 struct ieee80211_sub_if_data *sdata = local->ps_sdata;
547 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
756 548
757 /* can only happen when PS was just disabled anyway */ 549 /* can only happen when PS was just disabled anyway */
758 if (!sdata) 550 if (!sdata)
@@ -761,11 +553,16 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
761 if (local->hw.conf.flags & IEEE80211_CONF_PS) 553 if (local->hw.conf.flags & IEEE80211_CONF_PS)
762 return; 554 return;
763 555
764 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) 556 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
557 (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)))
765 ieee80211_send_nullfunc(local, sdata, 1); 558 ieee80211_send_nullfunc(local, sdata, 1);
766 559
767 local->hw.conf.flags |= IEEE80211_CONF_PS; 560 if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) ||
768 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 561 (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
562 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
563 local->hw.conf.flags |= IEEE80211_CONF_PS;
564 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
565 }
769} 566}
770 567
771void ieee80211_dynamic_ps_timer(unsigned long data) 568void ieee80211_dynamic_ps_timer(unsigned long data)
@@ -786,9 +583,9 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
786 struct ieee80211_tx_queue_params params; 583 struct ieee80211_tx_queue_params params;
787 size_t left; 584 size_t left;
788 int count; 585 int count;
789 u8 *pos; 586 u8 *pos, uapsd_queues = 0;
790 587
791 if (!(ifmgd->flags & IEEE80211_STA_WMM_ENABLED)) 588 if (local->hw.queues < 4)
792 return; 589 return;
793 590
794 if (!wmm_param) 591 if (!wmm_param)
@@ -796,6 +593,10 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
796 593
797 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1) 594 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
798 return; 595 return;
596
597 if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
598 uapsd_queues = local->uapsd_queues;
599
799 count = wmm_param[6] & 0x0f; 600 count = wmm_param[6] & 0x0f;
800 if (count == ifmgd->wmm_last_param_set) 601 if (count == ifmgd->wmm_last_param_set)
801 return; 602 return;
@@ -810,6 +611,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
810 for (; left >= 4; left -= 4, pos += 4) { 611 for (; left >= 4; left -= 4, pos += 4) {
811 int aci = (pos[0] >> 5) & 0x03; 612 int aci = (pos[0] >> 5) & 0x03;
812 int acm = (pos[0] >> 4) & 0x01; 613 int acm = (pos[0] >> 4) & 0x01;
614 bool uapsd = false;
813 int queue; 615 int queue;
814 616
815 switch (aci) { 617 switch (aci) {
@@ -817,22 +619,30 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
817 queue = 3; 619 queue = 3;
818 if (acm) 620 if (acm)
819 local->wmm_acm |= BIT(1) | BIT(2); /* BK/- */ 621 local->wmm_acm |= BIT(1) | BIT(2); /* BK/- */
622 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
623 uapsd = true;
820 break; 624 break;
821 case 2: /* AC_VI */ 625 case 2: /* AC_VI */
822 queue = 1; 626 queue = 1;
823 if (acm) 627 if (acm)
824 local->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */ 628 local->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */
629 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
630 uapsd = true;
825 break; 631 break;
826 case 3: /* AC_VO */ 632 case 3: /* AC_VO */
827 queue = 0; 633 queue = 0;
828 if (acm) 634 if (acm)
829 local->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */ 635 local->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */
636 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
637 uapsd = true;
830 break; 638 break;
831 case 0: /* AC_BE */ 639 case 0: /* AC_BE */
832 default: 640 default:
833 queue = 2; 641 queue = 2;
834 if (acm) 642 if (acm)
835 local->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */ 643 local->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */
644 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
645 uapsd = true;
836 break; 646 break;
837 } 647 }
838 648
@@ -840,11 +650,14 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
840 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4); 650 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4);
841 params.cw_min = ecw2cw(pos[1] & 0x0f); 651 params.cw_min = ecw2cw(pos[1] & 0x0f);
842 params.txop = get_unaligned_le16(pos + 2); 652 params.txop = get_unaligned_le16(pos + 2);
653 params.uapsd = uapsd;
654
843#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 655#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
844 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " 656 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
845 "cWmin=%d cWmax=%d txop=%d\n", 657 "cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
846 wiphy_name(local->hw.wiphy), queue, aci, acm, 658 wiphy_name(local->hw.wiphy), queue, aci, acm,
847 params.aifs, params.cw_min, params.cw_max, params.txop); 659 params.aifs, params.cw_min, params.cw_max, params.txop,
660 params.uapsd);
848#endif 661#endif
849 if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx) 662 if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx)
850 printk(KERN_DEBUG "%s: failed to set TX queue " 663 printk(KERN_DEBUG "%s: failed to set TX queue "
@@ -871,6 +684,8 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
871 } 684 }
872 685
873 use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME); 686 use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
687 if (sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ)
688 use_short_slot = true;
874 689
875 if (use_protection != bss_conf->use_cts_prot) { 690 if (use_protection != bss_conf->use_cts_prot) {
876 bss_conf->use_cts_prot = use_protection; 691 bss_conf->use_cts_prot = use_protection;
@@ -891,25 +706,23 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
891} 706}
892 707
893static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, 708static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
894 struct ieee80211_mgd_work *wk, 709 struct cfg80211_bss *cbss,
895 u32 bss_info_changed) 710 u32 bss_info_changed)
896{ 711{
712 struct ieee80211_bss *bss = (void *)cbss->priv;
897 struct ieee80211_local *local = sdata->local; 713 struct ieee80211_local *local = sdata->local;
898 struct ieee80211_bss *bss = wk->bss;
899 714
900 bss_info_changed |= BSS_CHANGED_ASSOC; 715 bss_info_changed |= BSS_CHANGED_ASSOC;
901 /* set timing information */ 716 /* set timing information */
902 sdata->vif.bss_conf.beacon_int = bss->cbss.beacon_interval; 717 sdata->vif.bss_conf.beacon_int = cbss->beacon_interval;
903 sdata->vif.bss_conf.timestamp = bss->cbss.tsf; 718 sdata->vif.bss_conf.timestamp = cbss->tsf;
904 sdata->vif.bss_conf.dtim_period = bss->dtim_period;
905 719
906 bss_info_changed |= BSS_CHANGED_BEACON_INT; 720 bss_info_changed |= BSS_CHANGED_BEACON_INT;
907 bss_info_changed |= ieee80211_handle_bss_capability(sdata, 721 bss_info_changed |= ieee80211_handle_bss_capability(sdata,
908 bss->cbss.capability, bss->has_erp_value, bss->erp_value); 722 cbss->capability, bss->has_erp_value, bss->erp_value);
909 723
910 sdata->u.mgd.associated = bss; 724 sdata->u.mgd.associated = cbss;
911 sdata->u.mgd.old_associate_work = wk; 725 memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN);
912 memcpy(sdata->u.mgd.bssid, bss->cbss.bssid, ETH_ALEN);
913 726
914 /* just to be sure */ 727 /* just to be sure */
915 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | 728 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
@@ -940,99 +753,14 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
940 753
941 mutex_lock(&local->iflist_mtx); 754 mutex_lock(&local->iflist_mtx);
942 ieee80211_recalc_ps(local, -1); 755 ieee80211_recalc_ps(local, -1);
756 ieee80211_recalc_smps(local, sdata);
943 mutex_unlock(&local->iflist_mtx); 757 mutex_unlock(&local->iflist_mtx);
944 758
945 netif_tx_start_all_queues(sdata->dev); 759 netif_tx_start_all_queues(sdata->dev);
946 netif_carrier_on(sdata->dev); 760 netif_carrier_on(sdata->dev);
947} 761}
948 762
949static enum rx_mgmt_action __must_check 763static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata)
950ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata,
951 struct ieee80211_mgd_work *wk)
952{
953 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
954 struct ieee80211_local *local = sdata->local;
955
956 wk->tries++;
957 if (wk->tries > IEEE80211_AUTH_MAX_TRIES) {
958 printk(KERN_DEBUG "%s: direct probe to AP %pM timed out\n",
959 sdata->dev->name, wk->bss->cbss.bssid);
960
961 /*
962 * Most likely AP is not in the range so remove the
963 * bss struct for that AP.
964 */
965 cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
966
967 /*
968 * We might have a pending scan which had no chance to run yet
969 * due to work needing to be done. Hence, queue the STAs work
970 * again for that.
971 */
972 ieee80211_queue_work(&local->hw, &ifmgd->work);
973 return RX_MGMT_CFG80211_AUTH_TO;
974 }
975
976 printk(KERN_DEBUG "%s: direct probe to AP %pM (try %d)\n",
977 sdata->dev->name, wk->bss->cbss.bssid,
978 wk->tries);
979
980 /*
981 * Direct probe is sent to broadcast address as some APs
982 * will not answer to direct packet in unassociated state.
983 */
984 ieee80211_send_probe_req(sdata, NULL, wk->ssid, wk->ssid_len, NULL, 0);
985
986 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
987 run_again(ifmgd, wk->timeout);
988
989 return RX_MGMT_NONE;
990}
991
992
993static enum rx_mgmt_action __must_check
994ieee80211_authenticate(struct ieee80211_sub_if_data *sdata,
995 struct ieee80211_mgd_work *wk)
996{
997 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
998 struct ieee80211_local *local = sdata->local;
999
1000 wk->tries++;
1001 if (wk->tries > IEEE80211_AUTH_MAX_TRIES) {
1002 printk(KERN_DEBUG "%s: authentication with AP %pM"
1003 " timed out\n",
1004 sdata->dev->name, wk->bss->cbss.bssid);
1005
1006 /*
1007 * Most likely AP is not in the range so remove the
1008 * bss struct for that AP.
1009 */
1010 cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
1011
1012 /*
1013 * We might have a pending scan which had no chance to run yet
1014 * due to work needing to be done. Hence, queue the STAs work
1015 * again for that.
1016 */
1017 ieee80211_queue_work(&local->hw, &ifmgd->work);
1018 return RX_MGMT_CFG80211_AUTH_TO;
1019 }
1020
1021 printk(KERN_DEBUG "%s: authenticate with AP %pM (try %d)\n",
1022 sdata->dev->name, wk->bss->cbss.bssid, wk->tries);
1023
1024 ieee80211_send_auth(sdata, 1, wk->auth_alg, wk->ie, wk->ie_len,
1025 wk->bss->cbss.bssid, NULL, 0, 0);
1026 wk->auth_transaction = 2;
1027
1028 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
1029 run_again(ifmgd, wk->timeout);
1030
1031 return RX_MGMT_NONE;
1032}
1033
1034static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1035 bool deauth)
1036{ 764{
1037 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 765 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1038 struct ieee80211_local *local = sdata->local; 766 struct ieee80211_local *local = sdata->local;
@@ -1045,21 +773,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1045 if (WARN_ON(!ifmgd->associated)) 773 if (WARN_ON(!ifmgd->associated))
1046 return; 774 return;
1047 775
1048 memcpy(bssid, ifmgd->associated->cbss.bssid, ETH_ALEN); 776 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
1049 777
1050 ifmgd->associated = NULL; 778 ifmgd->associated = NULL;
1051 memset(ifmgd->bssid, 0, ETH_ALEN); 779 memset(ifmgd->bssid, 0, ETH_ALEN);
1052 780
1053 if (deauth) {
1054 kfree(ifmgd->old_associate_work);
1055 ifmgd->old_associate_work = NULL;
1056 } else {
1057 struct ieee80211_mgd_work *wk = ifmgd->old_associate_work;
1058
1059 wk->state = IEEE80211_MGD_STATE_IDLE;
1060 list_add(&wk->list, &ifmgd->work_list);
1061 }
1062
1063 /* 781 /*
1064 * we need to commit the associated = NULL change because the 782 * we need to commit the associated = NULL change because the
1065 * scan code uses that to determine whether this iface should 783 * scan code uses that to determine whether this iface should
@@ -1078,9 +796,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1078 netif_carrier_off(sdata->dev); 796 netif_carrier_off(sdata->dev);
1079 797
1080 rcu_read_lock(); 798 rcu_read_lock();
1081 sta = sta_info_get(local, bssid); 799 sta = sta_info_get(sdata, bssid);
1082 if (sta) 800 if (sta) {
801 set_sta_flags(sta, WLAN_STA_DISASSOC);
1083 ieee80211_sta_tear_down_BA_sessions(sta); 802 ieee80211_sta_tear_down_BA_sessions(sta);
803 }
1084 rcu_read_unlock(); 804 rcu_read_unlock();
1085 805
1086 changed |= ieee80211_reset_erp_info(sdata); 806 changed |= ieee80211_reset_erp_info(sdata);
@@ -1113,57 +833,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1113 changed |= BSS_CHANGED_BSSID; 833 changed |= BSS_CHANGED_BSSID;
1114 ieee80211_bss_info_change_notify(sdata, changed); 834 ieee80211_bss_info_change_notify(sdata, changed);
1115 835
1116 rcu_read_lock(); 836 sta_info_destroy_addr(sdata, bssid);
1117
1118 sta = sta_info_get(local, bssid);
1119 if (!sta) {
1120 rcu_read_unlock();
1121 return;
1122 }
1123
1124 sta_info_unlink(&sta);
1125
1126 rcu_read_unlock();
1127
1128 sta_info_destroy(sta);
1129}
1130
1131static enum rx_mgmt_action __must_check
1132ieee80211_associate(struct ieee80211_sub_if_data *sdata,
1133 struct ieee80211_mgd_work *wk)
1134{
1135 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1136 struct ieee80211_local *local = sdata->local;
1137
1138 wk->tries++;
1139 if (wk->tries > IEEE80211_ASSOC_MAX_TRIES) {
1140 printk(KERN_DEBUG "%s: association with AP %pM"
1141 " timed out\n",
1142 sdata->dev->name, wk->bss->cbss.bssid);
1143
1144 /*
1145 * Most likely AP is not in the range so remove the
1146 * bss struct for that AP.
1147 */
1148 cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
1149
1150 /*
1151 * We might have a pending scan which had no chance to run yet
1152 * due to work needing to be done. Hence, queue the STAs work
1153 * again for that.
1154 */
1155 ieee80211_queue_work(&local->hw, &ifmgd->work);
1156 return RX_MGMT_CFG80211_ASSOC_TO;
1157 }
1158
1159 printk(KERN_DEBUG "%s: associate with AP %pM (try %d)\n",
1160 sdata->dev->name, wk->bss->cbss.bssid, wk->tries);
1161 ieee80211_send_assoc(sdata, wk);
1162
1163 wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
1164 run_again(ifmgd, wk->timeout);
1165
1166 return RX_MGMT_NONE;
1167} 837}
1168 838
1169void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, 839void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
@@ -1189,8 +859,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1189 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 859 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1190 const u8 *ssid; 860 const u8 *ssid;
1191 861
1192 ssid = ieee80211_bss_get_ie(&ifmgd->associated->cbss, WLAN_EID_SSID); 862 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
1193 ieee80211_send_probe_req(sdata, ifmgd->associated->cbss.bssid, 863 ieee80211_send_probe_req(sdata, ifmgd->associated->bssid,
1194 ssid + 2, ssid[1], NULL, 0); 864 ssid + 2, ssid[1], NULL, 0);
1195 865
1196 ifmgd->probe_send_count++; 866 ifmgd->probe_send_count++;
@@ -1204,12 +874,15 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1204 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 874 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1205 bool already = false; 875 bool already = false;
1206 876
1207 if (!netif_running(sdata->dev)) 877 if (!ieee80211_sdata_running(sdata))
1208 return; 878 return;
1209 879
1210 if (sdata->local->scanning) 880 if (sdata->local->scanning)
1211 return; 881 return;
1212 882
883 if (sdata->local->tmp_channel)
884 return;
885
1213 mutex_lock(&ifmgd->mtx); 886 mutex_lock(&ifmgd->mtx);
1214 887
1215 if (!ifmgd->associated) 888 if (!ifmgd->associated)
@@ -1218,7 +891,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1218#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 891#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1219 if (beacon && net_ratelimit()) 892 if (beacon && net_ratelimit())
1220 printk(KERN_DEBUG "%s: detected beacon loss from AP " 893 printk(KERN_DEBUG "%s: detected beacon loss from AP "
1221 "- sending probe request\n", sdata->dev->name); 894 "- sending probe request\n", sdata->name);
1222#endif 895#endif
1223 896
1224 /* 897 /*
@@ -1271,88 +944,8 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif)
1271} 944}
1272EXPORT_SYMBOL(ieee80211_beacon_loss); 945EXPORT_SYMBOL(ieee80211_beacon_loss);
1273 946
1274static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata,
1275 struct ieee80211_mgd_work *wk)
1276{
1277 wk->state = IEEE80211_MGD_STATE_IDLE;
1278 printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name);
1279}
1280
1281
1282static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
1283 struct ieee80211_mgd_work *wk,
1284 struct ieee80211_mgmt *mgmt,
1285 size_t len)
1286{
1287 u8 *pos;
1288 struct ieee802_11_elems elems;
1289
1290 pos = mgmt->u.auth.variable;
1291 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1292 if (!elems.challenge)
1293 return;
1294 ieee80211_send_auth(sdata, 3, wk->auth_alg,
1295 elems.challenge - 2, elems.challenge_len + 2,
1296 wk->bss->cbss.bssid,
1297 wk->key, wk->key_len, wk->key_idx);
1298 wk->auth_transaction = 4;
1299}
1300
1301static enum rx_mgmt_action __must_check
1302ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1303 struct ieee80211_mgd_work *wk,
1304 struct ieee80211_mgmt *mgmt, size_t len)
1305{
1306 u16 auth_alg, auth_transaction, status_code;
1307
1308 if (wk->state != IEEE80211_MGD_STATE_AUTH)
1309 return RX_MGMT_NONE;
1310
1311 if (len < 24 + 6)
1312 return RX_MGMT_NONE;
1313
1314 if (memcmp(wk->bss->cbss.bssid, mgmt->sa, ETH_ALEN) != 0)
1315 return RX_MGMT_NONE;
1316
1317 if (memcmp(wk->bss->cbss.bssid, mgmt->bssid, ETH_ALEN) != 0)
1318 return RX_MGMT_NONE;
1319
1320 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
1321 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
1322 status_code = le16_to_cpu(mgmt->u.auth.status_code);
1323
1324 if (auth_alg != wk->auth_alg ||
1325 auth_transaction != wk->auth_transaction)
1326 return RX_MGMT_NONE;
1327
1328 if (status_code != WLAN_STATUS_SUCCESS) {
1329 list_del(&wk->list);
1330 kfree(wk);
1331 return RX_MGMT_CFG80211_AUTH;
1332 }
1333
1334 switch (wk->auth_alg) {
1335 case WLAN_AUTH_OPEN:
1336 case WLAN_AUTH_LEAP:
1337 case WLAN_AUTH_FT:
1338 ieee80211_auth_completed(sdata, wk);
1339 return RX_MGMT_CFG80211_AUTH;
1340 case WLAN_AUTH_SHARED_KEY:
1341 if (wk->auth_transaction == 4) {
1342 ieee80211_auth_completed(sdata, wk);
1343 return RX_MGMT_CFG80211_AUTH;
1344 } else
1345 ieee80211_auth_challenge(sdata, wk, mgmt, len);
1346 break;
1347 }
1348
1349 return RX_MGMT_NONE;
1350}
1351
1352
1353static enum rx_mgmt_action __must_check 947static enum rx_mgmt_action __must_check
1354ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, 948ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1355 struct ieee80211_mgd_work *wk,
1356 struct ieee80211_mgmt *mgmt, size_t len) 949 struct ieee80211_mgmt *mgmt, size_t len)
1357{ 950{
1358 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 951 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -1364,23 +957,15 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1364 957
1365 ASSERT_MGD_MTX(ifmgd); 958 ASSERT_MGD_MTX(ifmgd);
1366 959
1367 if (wk) 960 bssid = ifmgd->associated->bssid;
1368 bssid = wk->bss->cbss.bssid;
1369 else
1370 bssid = ifmgd->associated->cbss.bssid;
1371 961
1372 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); 962 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
1373 963
1374 printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n", 964 printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n",
1375 sdata->dev->name, bssid, reason_code); 965 sdata->name, bssid, reason_code);
1376 966
1377 if (!wk) { 967 ieee80211_set_disassoc(sdata);
1378 ieee80211_set_disassoc(sdata, true); 968 ieee80211_recalc_idle(sdata->local);
1379 ieee80211_recalc_idle(sdata->local);
1380 } else {
1381 list_del(&wk->list);
1382 kfree(wk);
1383 }
1384 969
1385 return RX_MGMT_CFG80211_DEAUTH; 970 return RX_MGMT_CFG80211_DEAUTH;
1386} 971}
@@ -1401,123 +986,72 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1401 if (WARN_ON(!ifmgd->associated)) 986 if (WARN_ON(!ifmgd->associated))
1402 return RX_MGMT_NONE; 987 return RX_MGMT_NONE;
1403 988
1404 if (WARN_ON(memcmp(ifmgd->associated->cbss.bssid, mgmt->sa, ETH_ALEN))) 989 if (WARN_ON(memcmp(ifmgd->associated->bssid, mgmt->sa, ETH_ALEN)))
1405 return RX_MGMT_NONE; 990 return RX_MGMT_NONE;
1406 991
1407 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 992 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
1408 993
1409 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n", 994 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n",
1410 sdata->dev->name, mgmt->sa, reason_code); 995 sdata->name, mgmt->sa, reason_code);
1411 996
1412 ieee80211_set_disassoc(sdata, false); 997 ieee80211_set_disassoc(sdata);
1413 ieee80211_recalc_idle(sdata->local); 998 ieee80211_recalc_idle(sdata->local);
1414 return RX_MGMT_CFG80211_DISASSOC; 999 return RX_MGMT_CFG80211_DISASSOC;
1415} 1000}
1416 1001
1417 1002
1418static enum rx_mgmt_action __must_check 1003static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1419ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, 1004 struct ieee80211_mgmt *mgmt, size_t len)
1420 struct ieee80211_mgd_work *wk,
1421 struct ieee80211_mgmt *mgmt, size_t len,
1422 bool reassoc)
1423{ 1005{
1006 struct ieee80211_sub_if_data *sdata = wk->sdata;
1424 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1007 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1425 struct ieee80211_local *local = sdata->local; 1008 struct ieee80211_local *local = sdata->local;
1426 struct ieee80211_supported_band *sband; 1009 struct ieee80211_supported_band *sband;
1427 struct sta_info *sta; 1010 struct sta_info *sta;
1011 struct cfg80211_bss *cbss = wk->assoc.bss;
1012 u8 *pos;
1428 u32 rates, basic_rates; 1013 u32 rates, basic_rates;
1429 u16 capab_info, status_code, aid; 1014 u16 capab_info, aid;
1430 struct ieee802_11_elems elems; 1015 struct ieee802_11_elems elems;
1431 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; 1016 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
1432 u8 *pos;
1433 u32 changed = 0; 1017 u32 changed = 0;
1434 int i, j; 1018 int i, j, err;
1435 bool have_higher_than_11mbit = false, newsta = false; 1019 bool have_higher_than_11mbit = false;
1436 u16 ap_ht_cap_flags; 1020 u16 ap_ht_cap_flags;
1437 1021
1438 /* 1022 /* AssocResp and ReassocResp have identical structure */
1439 * AssocResp and ReassocResp have identical structure, so process both
1440 * of them in this function.
1441 */
1442
1443 if (len < 24 + 6)
1444 return RX_MGMT_NONE;
1445
1446 if (memcmp(wk->bss->cbss.bssid, mgmt->sa, ETH_ALEN) != 0)
1447 return RX_MGMT_NONE;
1448 1023
1449 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
1450 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
1451 aid = le16_to_cpu(mgmt->u.assoc_resp.aid); 1024 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
1452 1025 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
1453 printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
1454 "status=%d aid=%d)\n",
1455 sdata->dev->name, reassoc ? "Rea" : "A", mgmt->sa,
1456 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
1457
1458 pos = mgmt->u.assoc_resp.variable;
1459 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1460
1461 if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
1462 elems.timeout_int && elems.timeout_int_len == 5 &&
1463 elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
1464 u32 tu, ms;
1465 tu = get_unaligned_le32(elems.timeout_int + 1);
1466 ms = tu * 1024 / 1000;
1467 printk(KERN_DEBUG "%s: AP rejected association temporarily; "
1468 "comeback duration %u TU (%u ms)\n",
1469 sdata->dev->name, tu, ms);
1470 wk->timeout = jiffies + msecs_to_jiffies(ms);
1471 if (ms > IEEE80211_ASSOC_TIMEOUT)
1472 run_again(ifmgd, jiffies + msecs_to_jiffies(ms));
1473 return RX_MGMT_NONE;
1474 }
1475
1476 if (status_code != WLAN_STATUS_SUCCESS) {
1477 printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
1478 sdata->dev->name, status_code);
1479 wk->state = IEEE80211_MGD_STATE_IDLE;
1480 return RX_MGMT_CFG80211_ASSOC;
1481 }
1482 1026
1483 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) 1027 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
1484 printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not " 1028 printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not "
1485 "set\n", sdata->dev->name, aid); 1029 "set\n", sdata->name, aid);
1486 aid &= ~(BIT(15) | BIT(14)); 1030 aid &= ~(BIT(15) | BIT(14));
1487 1031
1032 pos = mgmt->u.assoc_resp.variable;
1033 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1034
1488 if (!elems.supp_rates) { 1035 if (!elems.supp_rates) {
1489 printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", 1036 printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n",
1490 sdata->dev->name); 1037 sdata->name);
1491 return RX_MGMT_NONE; 1038 return false;
1492 } 1039 }
1493 1040
1494 printk(KERN_DEBUG "%s: associated\n", sdata->dev->name);
1495 ifmgd->aid = aid; 1041 ifmgd->aid = aid;
1496 1042
1497 rcu_read_lock(); 1043 sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
1498
1499 /* Add STA entry for the AP */
1500 sta = sta_info_get(local, wk->bss->cbss.bssid);
1501 if (!sta) { 1044 if (!sta) {
1502 newsta = true; 1045 printk(KERN_DEBUG "%s: failed to alloc STA entry for"
1503 1046 " the AP\n", sdata->name);
1504 rcu_read_unlock(); 1047 return false;
1505
1506 sta = sta_info_alloc(sdata, wk->bss->cbss.bssid, GFP_KERNEL);
1507 if (!sta) {
1508 printk(KERN_DEBUG "%s: failed to alloc STA entry for"
1509 " the AP\n", sdata->dev->name);
1510 return RX_MGMT_NONE;
1511 }
1512
1513 set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC |
1514 WLAN_STA_ASSOC_AP);
1515 if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
1516 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
1517
1518 rcu_read_lock();
1519 } 1048 }
1520 1049
1050 set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC |
1051 WLAN_STA_ASSOC_AP);
1052 if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
1053 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
1054
1521 rates = 0; 1055 rates = 0;
1522 basic_rates = 0; 1056 basic_rates = 0;
1523 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1057 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
@@ -1580,40 +1114,40 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1580 if (elems.wmm_param) 1114 if (elems.wmm_param)
1581 set_sta_flags(sta, WLAN_STA_WME); 1115 set_sta_flags(sta, WLAN_STA_WME);
1582 1116
1583 if (newsta) { 1117 err = sta_info_insert(sta);
1584 int err = sta_info_insert(sta); 1118 sta = NULL;
1585 if (err) { 1119 if (err) {
1586 printk(KERN_DEBUG "%s: failed to insert STA entry for" 1120 printk(KERN_DEBUG "%s: failed to insert STA entry for"
1587 " the AP (error %d)\n", sdata->dev->name, err); 1121 " the AP (error %d)\n", sdata->name, err);
1588 rcu_read_unlock(); 1122 return false;
1589 return RX_MGMT_NONE;
1590 }
1591 } 1123 }
1592 1124
1593 rcu_read_unlock();
1594
1595 if (elems.wmm_param) 1125 if (elems.wmm_param)
1596 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param, 1126 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param,
1597 elems.wmm_param_len); 1127 elems.wmm_param_len);
1598 else 1128 else
1599 ieee80211_set_wmm_default(sdata); 1129 ieee80211_set_wmm_default(sdata);
1600 1130
1131 local->oper_channel = wk->chan;
1132
1601 if (elems.ht_info_elem && elems.wmm_param && 1133 if (elems.ht_info_elem && elems.wmm_param &&
1602 (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) && 1134 (sdata->local->hw.queues >= 4) &&
1603 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) 1135 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
1604 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, 1136 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
1605 wk->bss->cbss.bssid, 1137 cbss->bssid, ap_ht_cap_flags);
1606 ap_ht_cap_flags);
1607
1608 /* delete work item -- must be before set_associated for PS */
1609 list_del(&wk->list);
1610 1138
1611 /* set AID and assoc capability, 1139 /* set AID and assoc capability,
1612 * ieee80211_set_associated() will tell the driver */ 1140 * ieee80211_set_associated() will tell the driver */
1613 bss_conf->aid = aid; 1141 bss_conf->aid = aid;
1614 bss_conf->assoc_capability = capab_info; 1142 bss_conf->assoc_capability = capab_info;
1615 /* this will take ownership of wk */ 1143 ieee80211_set_associated(sdata, cbss, changed);
1616 ieee80211_set_associated(sdata, wk, changed); 1144
1145 /*
1146 * If we're using 4-addr mode, let the AP know that we're
1147 * doing so, so that it can create the STA VLAN on its side
1148 */
1149 if (ifmgd->use_4addr)
1150 ieee80211_send_4addr_nullfunc(local, sdata);
1617 1151
1618 /* 1152 /*
1619 * Start timer to probe the connection to the AP now. 1153 * Start timer to probe the connection to the AP now.
@@ -1622,7 +1156,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1622 ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt); 1156 ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
1623 mod_beacon_timer(sdata); 1157 mod_beacon_timer(sdata);
1624 1158
1625 return RX_MGMT_CFG80211_ASSOC; 1159 return true;
1626} 1160}
1627 1161
1628 1162
@@ -1637,6 +1171,13 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1637 int freq; 1171 int freq;
1638 struct ieee80211_bss *bss; 1172 struct ieee80211_bss *bss;
1639 struct ieee80211_channel *channel; 1173 struct ieee80211_channel *channel;
1174 bool need_ps = false;
1175
1176 if (sdata->u.mgd.associated) {
1177 bss = (void *)sdata->u.mgd.associated->priv;
1178 /* not previously set so we may need to recalc */
1179 need_ps = !bss->dtim_period;
1180 }
1640 1181
1641 if (elems->ds_params && elems->ds_params_len == 1) 1182 if (elems->ds_params && elems->ds_params_len == 1)
1642 freq = ieee80211_channel_to_frequency(elems->ds_params[0]); 1183 freq = ieee80211_channel_to_frequency(elems->ds_params[0]);
@@ -1656,8 +1197,14 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1656 if (!sdata->u.mgd.associated) 1197 if (!sdata->u.mgd.associated)
1657 return; 1198 return;
1658 1199
1200 if (need_ps) {
1201 mutex_lock(&local->iflist_mtx);
1202 ieee80211_recalc_ps(local, -1);
1203 mutex_unlock(&local->iflist_mtx);
1204 }
1205
1659 if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) && 1206 if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) &&
1660 (memcmp(mgmt->bssid, sdata->u.mgd.associated->cbss.bssid, 1207 (memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid,
1661 ETH_ALEN) == 0)) { 1208 ETH_ALEN) == 0)) {
1662 struct ieee80211_channel_sw_ie *sw_elem = 1209 struct ieee80211_channel_sw_ie *sw_elem =
1663 (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem; 1210 (struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
@@ -1667,19 +1214,19 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1667 1214
1668 1215
1669static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, 1216static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1670 struct ieee80211_mgd_work *wk, 1217 struct sk_buff *skb)
1671 struct ieee80211_mgmt *mgmt, size_t len,
1672 struct ieee80211_rx_status *rx_status)
1673{ 1218{
1219 struct ieee80211_mgmt *mgmt = (void *)skb->data;
1674 struct ieee80211_if_managed *ifmgd; 1220 struct ieee80211_if_managed *ifmgd;
1675 size_t baselen; 1221 struct ieee80211_rx_status *rx_status = (void *) skb->cb;
1222 size_t baselen, len = skb->len;
1676 struct ieee802_11_elems elems; 1223 struct ieee802_11_elems elems;
1677 1224
1678 ifmgd = &sdata->u.mgd; 1225 ifmgd = &sdata->u.mgd;
1679 1226
1680 ASSERT_MGD_MTX(ifmgd); 1227 ASSERT_MGD_MTX(ifmgd);
1681 1228
1682 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) 1229 if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
1683 return; /* ignore ProbeResp to foreign address */ 1230 return; /* ignore ProbeResp to foreign address */
1684 1231
1685 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 1232 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -1691,17 +1238,8 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1691 1238
1692 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false); 1239 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
1693 1240
1694 /* direct probe may be part of the association flow */
1695 if (wk && wk->state == IEEE80211_MGD_STATE_PROBE) {
1696 printk(KERN_DEBUG "%s: direct probe responded\n",
1697 sdata->dev->name);
1698 wk->tries = 0;
1699 wk->state = IEEE80211_MGD_STATE_AUTH;
1700 WARN_ON(ieee80211_authenticate(sdata, wk) != RX_MGMT_NONE);
1701 }
1702
1703 if (ifmgd->associated && 1241 if (ifmgd->associated &&
1704 memcmp(mgmt->bssid, ifmgd->associated->cbss.bssid, ETH_ALEN) == 0 && 1242 memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN) == 0 &&
1705 ifmgd->flags & (IEEE80211_STA_BEACON_POLL | 1243 ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
1706 IEEE80211_STA_CONNECTION_POLL)) { 1244 IEEE80211_STA_CONNECTION_POLL)) {
1707 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | 1245 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
@@ -1774,7 +1312,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1774 if (!ifmgd->associated) 1312 if (!ifmgd->associated)
1775 return; 1313 return;
1776 1314
1777 bssid = ifmgd->associated->cbss.bssid; 1315 bssid = ifmgd->associated->bssid;
1778 1316
1779 /* 1317 /*
1780 * And in theory even frames from a different AP we were just 1318 * And in theory even frames from a different AP we were just
@@ -1787,7 +1325,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1787#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1325#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1788 if (net_ratelimit()) { 1326 if (net_ratelimit()) {
1789 printk(KERN_DEBUG "%s: cancelling probereq poll due " 1327 printk(KERN_DEBUG "%s: cancelling probereq poll due "
1790 "to a received beacon\n", sdata->dev->name); 1328 "to a received beacon\n", sdata->name);
1791 } 1329 }
1792#endif 1330#endif
1793 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; 1331 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
@@ -1865,7 +1403,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1865 1403
1866 rcu_read_lock(); 1404 rcu_read_lock();
1867 1405
1868 sta = sta_info_get(local, bssid); 1406 sta = sta_info_get(sdata, bssid);
1869 if (WARN_ON(!sta)) { 1407 if (WARN_ON(!sta)) {
1870 rcu_read_unlock(); 1408 rcu_read_unlock();
1871 return; 1409 return;
@@ -1913,9 +1451,6 @@ ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
1913 switch (fc & IEEE80211_FCTL_STYPE) { 1451 switch (fc & IEEE80211_FCTL_STYPE) {
1914 case IEEE80211_STYPE_PROBE_RESP: 1452 case IEEE80211_STYPE_PROBE_RESP:
1915 case IEEE80211_STYPE_BEACON: 1453 case IEEE80211_STYPE_BEACON:
1916 case IEEE80211_STYPE_AUTH:
1917 case IEEE80211_STYPE_ASSOC_RESP:
1918 case IEEE80211_STYPE_REASSOC_RESP:
1919 case IEEE80211_STYPE_DEAUTH: 1454 case IEEE80211_STYPE_DEAUTH:
1920 case IEEE80211_STYPE_DISASSOC: 1455 case IEEE80211_STYPE_DISASSOC:
1921 case IEEE80211_STYPE_ACTION: 1456 case IEEE80211_STYPE_ACTION:
@@ -1933,7 +1468,6 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1933 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1468 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1934 struct ieee80211_rx_status *rx_status; 1469 struct ieee80211_rx_status *rx_status;
1935 struct ieee80211_mgmt *mgmt; 1470 struct ieee80211_mgmt *mgmt;
1936 struct ieee80211_mgd_work *wk;
1937 enum rx_mgmt_action rma = RX_MGMT_NONE; 1471 enum rx_mgmt_action rma = RX_MGMT_NONE;
1938 u16 fc; 1472 u16 fc;
1939 1473
@@ -1944,20 +1478,17 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1944 mutex_lock(&ifmgd->mtx); 1478 mutex_lock(&ifmgd->mtx);
1945 1479
1946 if (ifmgd->associated && 1480 if (ifmgd->associated &&
1947 memcmp(ifmgd->associated->cbss.bssid, mgmt->bssid, 1481 memcmp(ifmgd->associated->bssid, mgmt->bssid, ETH_ALEN) == 0) {
1948 ETH_ALEN) == 0) {
1949 switch (fc & IEEE80211_FCTL_STYPE) { 1482 switch (fc & IEEE80211_FCTL_STYPE) {
1950 case IEEE80211_STYPE_BEACON: 1483 case IEEE80211_STYPE_BEACON:
1951 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, 1484 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len,
1952 rx_status); 1485 rx_status);
1953 break; 1486 break;
1954 case IEEE80211_STYPE_PROBE_RESP: 1487 case IEEE80211_STYPE_PROBE_RESP:
1955 ieee80211_rx_mgmt_probe_resp(sdata, NULL, mgmt, 1488 ieee80211_rx_mgmt_probe_resp(sdata, skb);
1956 skb->len, rx_status);
1957 break; 1489 break;
1958 case IEEE80211_STYPE_DEAUTH: 1490 case IEEE80211_STYPE_DEAUTH:
1959 rma = ieee80211_rx_mgmt_deauth(sdata, NULL, 1491 rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len);
1960 mgmt, skb->len);
1961 break; 1492 break;
1962 case IEEE80211_STYPE_DISASSOC: 1493 case IEEE80211_STYPE_DISASSOC:
1963 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); 1494 rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
@@ -1968,7 +1499,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1968 1499
1969 ieee80211_sta_process_chanswitch(sdata, 1500 ieee80211_sta_process_chanswitch(sdata,
1970 &mgmt->u.action.u.chan_switch.sw_elem, 1501 &mgmt->u.action.u.chan_switch.sw_elem,
1971 ifmgd->associated); 1502 (void *)ifmgd->associated->priv);
1972 break; 1503 break;
1973 } 1504 }
1974 mutex_unlock(&ifmgd->mtx); 1505 mutex_unlock(&ifmgd->mtx);
@@ -1989,58 +1520,11 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1989 goto out; 1520 goto out;
1990 } 1521 }
1991 1522
1992 list_for_each_entry(wk, &ifmgd->work_list, list) {
1993 if (memcmp(wk->bss->cbss.bssid, mgmt->bssid, ETH_ALEN) != 0)
1994 continue;
1995
1996 switch (fc & IEEE80211_FCTL_STYPE) {
1997 case IEEE80211_STYPE_PROBE_RESP:
1998 ieee80211_rx_mgmt_probe_resp(sdata, wk, mgmt, skb->len,
1999 rx_status);
2000 break;
2001 case IEEE80211_STYPE_AUTH:
2002 rma = ieee80211_rx_mgmt_auth(sdata, wk, mgmt, skb->len);
2003 break;
2004 case IEEE80211_STYPE_ASSOC_RESP:
2005 rma = ieee80211_rx_mgmt_assoc_resp(sdata, wk, mgmt,
2006 skb->len, false);
2007 break;
2008 case IEEE80211_STYPE_REASSOC_RESP:
2009 rma = ieee80211_rx_mgmt_assoc_resp(sdata, wk, mgmt,
2010 skb->len, true);
2011 break;
2012 case IEEE80211_STYPE_DEAUTH:
2013 rma = ieee80211_rx_mgmt_deauth(sdata, wk, mgmt,
2014 skb->len);
2015 break;
2016 }
2017 /*
2018 * We've processed this frame for that work, so it can't
2019 * belong to another work struct.
2020 * NB: this is also required for correctness because the
2021 * called functions can free 'wk', and for 'rma'!
2022 */
2023 break;
2024 }
2025
2026 mutex_unlock(&ifmgd->mtx); 1523 mutex_unlock(&ifmgd->mtx);
2027 1524
2028 switch (rma) { 1525 if (skb->len >= 24 + 2 /* mgmt + deauth reason */ &&
2029 case RX_MGMT_NONE: 1526 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH)
2030 /* no action */
2031 break;
2032 case RX_MGMT_CFG80211_AUTH:
2033 cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, skb->len);
2034 break;
2035 case RX_MGMT_CFG80211_ASSOC:
2036 cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, skb->len);
2037 break;
2038 case RX_MGMT_CFG80211_DEAUTH:
2039 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); 1527 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
2040 break;
2041 default:
2042 WARN(1, "unexpected: %d", rma);
2043 }
2044 1528
2045 out: 1529 out:
2046 kfree_skb(skb); 1530 kfree_skb(skb);
@@ -2068,12 +1552,8 @@ static void ieee80211_sta_work(struct work_struct *work)
2068 struct ieee80211_local *local = sdata->local; 1552 struct ieee80211_local *local = sdata->local;
2069 struct ieee80211_if_managed *ifmgd; 1553 struct ieee80211_if_managed *ifmgd;
2070 struct sk_buff *skb; 1554 struct sk_buff *skb;
2071 struct ieee80211_mgd_work *wk, *tmp;
2072 LIST_HEAD(free_work);
2073 enum rx_mgmt_action rma;
2074 bool anybusy = false;
2075 1555
2076 if (!netif_running(sdata->dev)) 1556 if (!ieee80211_sdata_running(sdata))
2077 return; 1557 return;
2078 1558
2079 if (local->scanning) 1559 if (local->scanning)
@@ -2104,7 +1584,7 @@ static void ieee80211_sta_work(struct work_struct *work)
2104 ifmgd->associated) { 1584 ifmgd->associated) {
2105 u8 bssid[ETH_ALEN]; 1585 u8 bssid[ETH_ALEN];
2106 1586
2107 memcpy(bssid, ifmgd->associated->cbss.bssid, ETH_ALEN); 1587 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
2108 if (time_is_after_jiffies(ifmgd->probe_timeout)) 1588 if (time_is_after_jiffies(ifmgd->probe_timeout))
2109 run_again(ifmgd, ifmgd->probe_timeout); 1589 run_again(ifmgd, ifmgd->probe_timeout);
2110 1590
@@ -2126,7 +1606,7 @@ static void ieee80211_sta_work(struct work_struct *work)
2126 printk(KERN_DEBUG "No probe response from AP %pM" 1606 printk(KERN_DEBUG "No probe response from AP %pM"
2127 " after %dms, disconnecting.\n", 1607 " after %dms, disconnecting.\n",
2128 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); 1608 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
2129 ieee80211_set_disassoc(sdata, true); 1609 ieee80211_set_disassoc(sdata);
2130 ieee80211_recalc_idle(local); 1610 ieee80211_recalc_idle(local);
2131 mutex_unlock(&ifmgd->mtx); 1611 mutex_unlock(&ifmgd->mtx);
2132 /* 1612 /*
@@ -2141,87 +1621,7 @@ static void ieee80211_sta_work(struct work_struct *work)
2141 } 1621 }
2142 } 1622 }
2143 1623
2144
2145 ieee80211_recalc_idle(local);
2146
2147 list_for_each_entry_safe(wk, tmp, &ifmgd->work_list, list) {
2148 if (time_is_after_jiffies(wk->timeout)) {
2149 /*
2150 * This work item isn't supposed to be worked on
2151 * right now, but take care to adjust the timer
2152 * properly.
2153 */
2154 run_again(ifmgd, wk->timeout);
2155 continue;
2156 }
2157
2158 switch (wk->state) {
2159 default:
2160 WARN_ON(1);
2161 /* fall through */
2162 case IEEE80211_MGD_STATE_IDLE:
2163 /* nothing */
2164 rma = RX_MGMT_NONE;
2165 break;
2166 case IEEE80211_MGD_STATE_PROBE:
2167 rma = ieee80211_direct_probe(sdata, wk);
2168 break;
2169 case IEEE80211_MGD_STATE_AUTH:
2170 rma = ieee80211_authenticate(sdata, wk);
2171 break;
2172 case IEEE80211_MGD_STATE_ASSOC:
2173 rma = ieee80211_associate(sdata, wk);
2174 break;
2175 }
2176
2177 switch (rma) {
2178 case RX_MGMT_NONE:
2179 /* no action required */
2180 break;
2181 case RX_MGMT_CFG80211_AUTH_TO:
2182 case RX_MGMT_CFG80211_ASSOC_TO:
2183 list_del(&wk->list);
2184 list_add(&wk->list, &free_work);
2185 wk->tries = rma; /* small abuse but only local */
2186 break;
2187 default:
2188 WARN(1, "unexpected: %d", rma);
2189 }
2190 }
2191
2192 list_for_each_entry(wk, &ifmgd->work_list, list) {
2193 if (wk->state != IEEE80211_MGD_STATE_IDLE) {
2194 anybusy = true;
2195 break;
2196 }
2197 }
2198 if (!anybusy &&
2199 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request))
2200 ieee80211_queue_delayed_work(&local->hw,
2201 &local->scan_work,
2202 round_jiffies_relative(0));
2203
2204 mutex_unlock(&ifmgd->mtx); 1624 mutex_unlock(&ifmgd->mtx);
2205
2206 list_for_each_entry_safe(wk, tmp, &free_work, list) {
2207 switch (wk->tries) {
2208 case RX_MGMT_CFG80211_AUTH_TO:
2209 cfg80211_send_auth_timeout(sdata->dev,
2210 wk->bss->cbss.bssid);
2211 break;
2212 case RX_MGMT_CFG80211_ASSOC_TO:
2213 cfg80211_send_assoc_timeout(sdata->dev,
2214 wk->bss->cbss.bssid);
2215 break;
2216 default:
2217 WARN(1, "unexpected: %d", wk->tries);
2218 }
2219
2220 list_del(&wk->list);
2221 kfree(wk);
2222 }
2223
2224 ieee80211_recalc_idle(local);
2225} 1625}
2226 1626
2227static void ieee80211_sta_bcn_mon_timer(unsigned long data) 1627static void ieee80211_sta_bcn_mon_timer(unsigned long data)
@@ -2330,14 +1730,14 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
2330 (unsigned long) sdata); 1730 (unsigned long) sdata);
2331 skb_queue_head_init(&ifmgd->skb_queue); 1731 skb_queue_head_init(&ifmgd->skb_queue);
2332 1732
2333 INIT_LIST_HEAD(&ifmgd->work_list);
2334
2335 ifmgd->capab = WLAN_CAPABILITY_ESS;
2336 ifmgd->flags = 0; 1733 ifmgd->flags = 0;
2337 if (sdata->local->hw.queues >= 4)
2338 ifmgd->flags |= IEEE80211_STA_WMM_ENABLED;
2339 1734
2340 mutex_init(&ifmgd->mtx); 1735 mutex_init(&ifmgd->mtx);
1736
1737 if (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
1738 ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC;
1739 else
1740 ifmgd->req_smps = IEEE80211_SMPS_OFF;
2341} 1741}
2342 1742
2343/* scan finished notification */ 1743/* scan finished notification */
@@ -2368,12 +1768,34 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
2368} 1768}
2369 1769
2370/* config hooks */ 1770/* config hooks */
1771static enum work_done_result
1772ieee80211_probe_auth_done(struct ieee80211_work *wk,
1773 struct sk_buff *skb)
1774{
1775 if (!skb) {
1776 cfg80211_send_auth_timeout(wk->sdata->dev, wk->filter_ta);
1777 return WORK_DONE_DESTROY;
1778 }
1779
1780 if (wk->type == IEEE80211_WORK_AUTH) {
1781 cfg80211_send_rx_auth(wk->sdata->dev, skb->data, skb->len);
1782 return WORK_DONE_DESTROY;
1783 }
1784
1785 mutex_lock(&wk->sdata->u.mgd.mtx);
1786 ieee80211_rx_mgmt_probe_resp(wk->sdata, skb);
1787 mutex_unlock(&wk->sdata->u.mgd.mtx);
1788
1789 wk->type = IEEE80211_WORK_AUTH;
1790 wk->probe_auth.tries = 0;
1791 return WORK_DONE_REQUEUE;
1792}
1793
2371int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, 1794int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2372 struct cfg80211_auth_request *req) 1795 struct cfg80211_auth_request *req)
2373{ 1796{
2374 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2375 const u8 *ssid; 1797 const u8 *ssid;
2376 struct ieee80211_mgd_work *wk; 1798 struct ieee80211_work *wk;
2377 u16 auth_alg; 1799 u16 auth_alg;
2378 1800
2379 switch (req->auth_type) { 1801 switch (req->auth_type) {
@@ -2397,7 +1819,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2397 if (!wk) 1819 if (!wk)
2398 return -ENOMEM; 1820 return -ENOMEM;
2399 1821
2400 wk->bss = (void *)req->bss; 1822 memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN);
2401 1823
2402 if (req->ie && req->ie_len) { 1824 if (req->ie && req->ie_len) {
2403 memcpy(wk->ie, req->ie, req->ie_len); 1825 memcpy(wk->ie, req->ie, req->ie_len);
@@ -2405,68 +1827,83 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2405 } 1827 }
2406 1828
2407 if (req->key && req->key_len) { 1829 if (req->key && req->key_len) {
2408 wk->key_len = req->key_len; 1830 wk->probe_auth.key_len = req->key_len;
2409 wk->key_idx = req->key_idx; 1831 wk->probe_auth.key_idx = req->key_idx;
2410 memcpy(wk->key, req->key, req->key_len); 1832 memcpy(wk->probe_auth.key, req->key, req->key_len);
2411 } 1833 }
2412 1834
2413 ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID); 1835 ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
2414 memcpy(wk->ssid, ssid + 2, ssid[1]); 1836 memcpy(wk->probe_auth.ssid, ssid + 2, ssid[1]);
2415 wk->ssid_len = ssid[1]; 1837 wk->probe_auth.ssid_len = ssid[1];
2416
2417 wk->state = IEEE80211_MGD_STATE_PROBE;
2418 wk->auth_alg = auth_alg;
2419 wk->timeout = jiffies; /* run right away */
2420 1838
2421 /* 1839 wk->probe_auth.algorithm = auth_alg;
2422 * XXX: if still associated need to tell AP that we're going 1840 wk->probe_auth.privacy = req->bss->capability & WLAN_CAPABILITY_PRIVACY;
2423 * to sleep and then change channel etc.
2424 */
2425 sdata->local->oper_channel = req->bss->channel;
2426 ieee80211_hw_config(sdata->local, 0);
2427 1841
2428 mutex_lock(&ifmgd->mtx); 1842 /* if we already have a probe, don't probe again */
2429 list_add(&wk->list, &sdata->u.mgd.work_list); 1843 if (req->bss->proberesp_ies)
2430 mutex_unlock(&ifmgd->mtx); 1844 wk->type = IEEE80211_WORK_AUTH;
1845 else
1846 wk->type = IEEE80211_WORK_DIRECT_PROBE;
1847 wk->chan = req->bss->channel;
1848 wk->sdata = sdata;
1849 wk->done = ieee80211_probe_auth_done;
2431 1850
2432 ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.work); 1851 ieee80211_add_work(wk);
2433 return 0; 1852 return 0;
2434} 1853}
2435 1854
2436int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, 1855static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
2437 struct cfg80211_assoc_request *req) 1856 struct sk_buff *skb)
2438{ 1857{
2439 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1858 struct ieee80211_mgmt *mgmt;
2440 struct ieee80211_mgd_work *wk, *found = NULL; 1859 u16 status;
2441 int i, err;
2442 1860
2443 mutex_lock(&ifmgd->mtx); 1861 if (!skb) {
1862 cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta);
1863 return WORK_DONE_DESTROY;
1864 }
2444 1865
2445 list_for_each_entry(wk, &ifmgd->work_list, list) { 1866 mgmt = (void *)skb->data;
2446 if (&wk->bss->cbss == req->bss && 1867 status = le16_to_cpu(mgmt->u.assoc_resp.status_code);
2447 wk->state == IEEE80211_MGD_STATE_IDLE) { 1868
2448 found = wk; 1869 if (status == WLAN_STATUS_SUCCESS) {
2449 break; 1870 mutex_lock(&wk->sdata->u.mgd.mtx);
1871 if (!ieee80211_assoc_success(wk, mgmt, skb->len)) {
1872 mutex_unlock(&wk->sdata->u.mgd.mtx);
1873 /* oops -- internal error -- send timeout for now */
1874 cfg80211_send_assoc_timeout(wk->sdata->dev,
1875 wk->filter_ta);
1876 return WORK_DONE_DESTROY;
2450 } 1877 }
1878 mutex_unlock(&wk->sdata->u.mgd.mtx);
2451 } 1879 }
2452 1880
2453 if (!found) { 1881 cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len);
2454 err = -ENOLINK; 1882 return WORK_DONE_DESTROY;
2455 goto out; 1883}
2456 }
2457 1884
2458 list_del(&found->list); 1885int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
1886 struct cfg80211_assoc_request *req)
1887{
1888 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1889 struct ieee80211_bss *bss = (void *)req->bss->priv;
1890 struct ieee80211_work *wk;
1891 const u8 *ssid;
1892 int i;
2459 1893
2460 wk = krealloc(found, sizeof(*wk) + req->ie_len, GFP_KERNEL); 1894 mutex_lock(&ifmgd->mtx);
2461 if (!wk) { 1895 if (ifmgd->associated) {
2462 list_add(&found->list, &ifmgd->work_list); 1896 mutex_unlock(&ifmgd->mtx);
2463 err = -ENOMEM; 1897 return -EALREADY;
2464 goto out;
2465 } 1898 }
1899 mutex_unlock(&ifmgd->mtx);
2466 1900
2467 list_add(&wk->list, &ifmgd->work_list); 1901 wk = kzalloc(sizeof(*wk) + req->ie_len, GFP_KERNEL);
1902 if (!wk)
1903 return -ENOMEM;
2468 1904
2469 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N; 1905 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
1906 ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
2470 1907
2471 for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) 1908 for (i = 0; i < req->crypto.n_ciphers_pairwise; i++)
2472 if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 || 1909 if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
@@ -2474,8 +1911,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2474 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) 1911 req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104)
2475 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 1912 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
2476 1913
2477 sdata->local->oper_channel = req->bss->channel;
2478 ieee80211_hw_config(sdata->local, 0);
2479 1914
2480 if (req->ie && req->ie_len) { 1915 if (req->ie && req->ie_len) {
2481 memcpy(wk->ie, req->ie, req->ie_len); 1916 memcpy(wk->ie, req->ie, req->ie_len);
@@ -2483,12 +1918,55 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2483 } else 1918 } else
2484 wk->ie_len = 0; 1919 wk->ie_len = 0;
2485 1920
1921 wk->assoc.bss = req->bss;
1922
1923 memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN);
1924
1925 /* new association always uses requested smps mode */
1926 if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) {
1927 if (ifmgd->powersave)
1928 ifmgd->ap_smps = IEEE80211_SMPS_DYNAMIC;
1929 else
1930 ifmgd->ap_smps = IEEE80211_SMPS_OFF;
1931 } else
1932 ifmgd->ap_smps = ifmgd->req_smps;
1933
1934 wk->assoc.smps = ifmgd->ap_smps;
1935 /*
1936 * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode.
1937 * We still associate in non-HT mode (11a/b/g) if any one of these
1938 * ciphers is configured as pairwise.
1939 * We can set this to true for non-11n hardware, that'll be checked
1940 * separately along with the peer capabilities.
1941 */
1942 wk->assoc.use_11n = !(ifmgd->flags & IEEE80211_STA_DISABLE_11N);
1943 wk->assoc.capability = req->bss->capability;
1944 wk->assoc.wmm_used = bss->wmm_used;
1945 wk->assoc.supp_rates = bss->supp_rates;
1946 wk->assoc.supp_rates_len = bss->supp_rates_len;
1947 wk->assoc.ht_information_ie =
1948 ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_INFORMATION);
1949
1950 if (bss->wmm_used && bss->uapsd_supported &&
1951 (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
1952 wk->assoc.uapsd_used = true;
1953 ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
1954 } else {
1955 wk->assoc.uapsd_used = false;
1956 ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED;
1957 }
1958
1959 ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
1960 memcpy(wk->assoc.ssid, ssid + 2, ssid[1]);
1961 wk->assoc.ssid_len = ssid[1];
1962
2486 if (req->prev_bssid) 1963 if (req->prev_bssid)
2487 memcpy(wk->prev_bssid, req->prev_bssid, ETH_ALEN); 1964 memcpy(wk->assoc.prev_bssid, req->prev_bssid, ETH_ALEN);
2488 1965
2489 wk->state = IEEE80211_MGD_STATE_ASSOC; 1966 wk->type = IEEE80211_WORK_ASSOC;
2490 wk->tries = 0; 1967 wk->chan = req->bss->channel;
2491 wk->timeout = jiffies; /* run right away */ 1968 wk->sdata = sdata;
1969 wk->done = ieee80211_assoc_done;
2492 1970
2493 if (req->use_mfp) { 1971 if (req->use_mfp) {
2494 ifmgd->mfp = IEEE80211_MFP_REQUIRED; 1972 ifmgd->mfp = IEEE80211_MFP_REQUIRED;
@@ -2503,69 +1981,65 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2503 else 1981 else
2504 ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT; 1982 ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT;
2505 1983
2506 ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.work); 1984 ieee80211_add_work(wk);
2507 1985 return 0;
2508 err = 0;
2509
2510 out:
2511 mutex_unlock(&ifmgd->mtx);
2512 return err;
2513} 1986}
2514 1987
2515int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, 1988int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2516 struct cfg80211_deauth_request *req, 1989 struct cfg80211_deauth_request *req,
2517 void *cookie) 1990 void *cookie)
2518{ 1991{
1992 struct ieee80211_local *local = sdata->local;
2519 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1993 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2520 struct ieee80211_mgd_work *wk; 1994 struct ieee80211_work *wk;
2521 const u8 *bssid = NULL; 1995 const u8 *bssid = req->bss->bssid;
2522 bool not_auth_yet = false;
2523 1996
2524 mutex_lock(&ifmgd->mtx); 1997 mutex_lock(&ifmgd->mtx);
2525 1998
2526 if (ifmgd->associated && &ifmgd->associated->cbss == req->bss) { 1999 if (ifmgd->associated == req->bss) {
2527 bssid = req->bss->bssid; 2000 bssid = req->bss->bssid;
2528 ieee80211_set_disassoc(sdata, true); 2001 ieee80211_set_disassoc(sdata);
2529 } else list_for_each_entry(wk, &ifmgd->work_list, list) {
2530 if (&wk->bss->cbss == req->bss) {
2531 bssid = req->bss->bssid;
2532 if (wk->state == IEEE80211_MGD_STATE_PROBE)
2533 not_auth_yet = true;
2534 list_del(&wk->list);
2535 kfree(wk);
2536 break;
2537 }
2538 }
2539
2540 /*
2541 * If somebody requests authentication and we haven't
2542 * sent out an auth frame yet there's no need to send
2543 * out a deauth frame either. If the state was PROBE,
2544 * then this is the case. If it's AUTH we have sent a
2545 * frame, and if it's IDLE we have completed the auth
2546 * process already.
2547 */
2548 if (not_auth_yet) {
2549 mutex_unlock(&ifmgd->mtx); 2002 mutex_unlock(&ifmgd->mtx);
2550 __cfg80211_auth_canceled(sdata->dev, bssid); 2003 } else {
2551 return 0; 2004 bool not_auth_yet = false;
2552 }
2553 2005
2554 /*
2555 * cfg80211 should catch this ... but it's racy since
2556 * we can receive a deauth frame, process it, hand it
2557 * to cfg80211 while that's in a locked section already
2558 * trying to tell us that the user wants to disconnect.
2559 */
2560 if (!bssid) {
2561 mutex_unlock(&ifmgd->mtx); 2006 mutex_unlock(&ifmgd->mtx);
2562 return -ENOLINK;
2563 }
2564 2007
2565 mutex_unlock(&ifmgd->mtx); 2008 mutex_lock(&local->work_mtx);
2009 list_for_each_entry(wk, &local->work_list, list) {
2010 if (wk->sdata != sdata)
2011 continue;
2012
2013 if (wk->type != IEEE80211_WORK_DIRECT_PROBE &&
2014 wk->type != IEEE80211_WORK_AUTH)
2015 continue;
2016
2017 if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN))
2018 continue;
2019
2020 not_auth_yet = wk->type == IEEE80211_WORK_DIRECT_PROBE;
2021 list_del_rcu(&wk->list);
2022 free_work(wk);
2023 break;
2024 }
2025 mutex_unlock(&local->work_mtx);
2026
2027 /*
2028 * If somebody requests authentication and we haven't
2029 * sent out an auth frame yet there's no need to send
2030 * out a deauth frame either. If the state was PROBE,
2031 * then this is the case. If it's AUTH we have sent a
2032 * frame, and if it's IDLE we have completed the auth
2033 * process already.
2034 */
2035 if (not_auth_yet) {
2036 __cfg80211_auth_canceled(sdata->dev, bssid);
2037 return 0;
2038 }
2039 }
2566 2040
2567 printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n", 2041 printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n",
2568 sdata->dev->name, bssid, req->reason_code); 2042 sdata->name, bssid, req->reason_code);
2569 2043
2570 ieee80211_send_deauth_disassoc(sdata, bssid, 2044 ieee80211_send_deauth_disassoc(sdata, bssid,
2571 IEEE80211_STYPE_DEAUTH, req->reason_code, 2045 IEEE80211_STYPE_DEAUTH, req->reason_code,
@@ -2590,15 +2064,15 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2590 * to cfg80211 while that's in a locked section already 2064 * to cfg80211 while that's in a locked section already
2591 * trying to tell us that the user wants to disconnect. 2065 * trying to tell us that the user wants to disconnect.
2592 */ 2066 */
2593 if (&ifmgd->associated->cbss != req->bss) { 2067 if (ifmgd->associated != req->bss) {
2594 mutex_unlock(&ifmgd->mtx); 2068 mutex_unlock(&ifmgd->mtx);
2595 return -ENOLINK; 2069 return -ENOLINK;
2596 } 2070 }
2597 2071
2598 printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n", 2072 printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n",
2599 sdata->dev->name, req->bss->bssid, req->reason_code); 2073 sdata->name, req->bss->bssid, req->reason_code);
2600 2074
2601 ieee80211_set_disassoc(sdata, false); 2075 ieee80211_set_disassoc(sdata);
2602 2076
2603 mutex_unlock(&ifmgd->mtx); 2077 mutex_unlock(&ifmgd->mtx);
2604 2078
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
new file mode 100644
index 00000000000..c36b1911987
--- /dev/null
+++ b/net/mac80211/offchannel.c
@@ -0,0 +1,170 @@
1/*
2 * Off-channel operation helpers
3 *
4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
5 * Copyright 2004, Instant802 Networks, Inc.
6 * Copyright 2005, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15#include <net/mac80211.h>
16#include "ieee80211_i.h"
17
18/*
19 * inform AP that we will go to sleep so that it will buffer the frames
20 * while we scan
21 */
22static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
23{
24 struct ieee80211_local *local = sdata->local;
25
26 local->offchannel_ps_enabled = false;
27
28 /* FIXME: what to do when local->pspolling is true? */
29
30 del_timer_sync(&local->dynamic_ps_timer);
31 cancel_work_sync(&local->dynamic_ps_enable_work);
32
33 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
34 local->offchannel_ps_enabled = true;
35 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
36 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
37 }
38
39 if (!(local->offchannel_ps_enabled) ||
40 !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
41 /*
42 * If power save was enabled, no need to send a nullfunc
43 * frame because AP knows that we are sleeping. But if the
44 * hardware is creating the nullfunc frame for power save
45 * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
46 * enabled) and power save was enabled, the firmware just
47 * sent a null frame with power save disabled. So we need
48 * to send a new nullfunc frame to inform the AP that we
49 * are again sleeping.
50 */
51 ieee80211_send_nullfunc(local, sdata, 1);
52}
53
54/* inform AP that we are awake again, unless power save is enabled */
55static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
56{
57 struct ieee80211_local *local = sdata->local;
58
59 if (!local->ps_sdata)
60 ieee80211_send_nullfunc(local, sdata, 0);
61 else if (local->offchannel_ps_enabled) {
62 /*
63 * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
64 * will send a nullfunc frame with the powersave bit set
65 * even though the AP already knows that we are sleeping.
66 * This could be avoided by sending a null frame with power
67 * save bit disabled before enabling the power save, but
68 * this doesn't gain anything.
69 *
70 * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
71 * to send a nullfunc frame because AP already knows that
72 * we are sleeping, let's just enable power save mode in
73 * hardware.
74 */
75 local->hw.conf.flags |= IEEE80211_CONF_PS;
76 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
77 } else if (local->hw.conf.dynamic_ps_timeout > 0) {
78 /*
79 * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
80 * had been running before leaving the operating channel,
81 * restart the timer now and send a nullfunc frame to inform
82 * the AP that we are awake.
83 */
84 ieee80211_send_nullfunc(local, sdata, 0);
85 mod_timer(&local->dynamic_ps_timer, jiffies +
86 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
87 }
88}
89
90void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
91{
92 struct ieee80211_sub_if_data *sdata;
93
94 mutex_lock(&local->iflist_mtx);
95 list_for_each_entry(sdata, &local->interfaces, list) {
96 if (!ieee80211_sdata_running(sdata))
97 continue;
98
99 /* disable beaconing */
100 if (sdata->vif.type == NL80211_IFTYPE_AP ||
101 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
102 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
103 ieee80211_bss_info_change_notify(
104 sdata, BSS_CHANGED_BEACON_ENABLED);
105
106 /*
107 * only handle non-STA interfaces here, STA interfaces
108 * are handled in ieee80211_offchannel_stop_station(),
109 * e.g., from the background scan state machine.
110 *
111 * In addition, do not stop monitor interface to allow it to be
112 * used from user space controlled off-channel operations.
113 */
114 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
115 sdata->vif.type != NL80211_IFTYPE_MONITOR)
116 netif_tx_stop_all_queues(sdata->dev);
117 }
118 mutex_unlock(&local->iflist_mtx);
119}
120
121void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
122{
123 struct ieee80211_sub_if_data *sdata;
124
125 /*
126 * notify the AP about us leaving the channel and stop all STA interfaces
127 */
128 mutex_lock(&local->iflist_mtx);
129 list_for_each_entry(sdata, &local->interfaces, list) {
130 if (!ieee80211_sdata_running(sdata))
131 continue;
132
133 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
134 netif_tx_stop_all_queues(sdata->dev);
135 if (sdata->u.mgd.associated)
136 ieee80211_offchannel_ps_enable(sdata);
137 }
138 }
139 mutex_unlock(&local->iflist_mtx);
140}
141
142void ieee80211_offchannel_return(struct ieee80211_local *local,
143 bool enable_beaconing)
144{
145 struct ieee80211_sub_if_data *sdata;
146
147 mutex_lock(&local->iflist_mtx);
148 list_for_each_entry(sdata, &local->interfaces, list) {
149 if (!ieee80211_sdata_running(sdata))
150 continue;
151
152 /* Tell AP we're back */
153 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
154 if (sdata->u.mgd.associated)
155 ieee80211_offchannel_ps_disable(sdata);
156 }
157
158 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
159 netif_tx_wake_all_queues(sdata->dev);
160
161 /* re-enable beaconing */
162 if (enable_beaconing &&
163 (sdata->vif.type == NL80211_IFTYPE_AP ||
164 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
165 sdata->vif.type == NL80211_IFTYPE_MESH_POINT))
166 ieee80211_bss_info_change_notify(
167 sdata, BSS_CHANGED_BEACON_ENABLED);
168 }
169 mutex_unlock(&local->iflist_mtx);
170}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index e535f1c988f..0e64484e861 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -10,9 +10,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
10{ 10{
11 struct ieee80211_local *local = hw_to_local(hw); 11 struct ieee80211_local *local = hw_to_local(hw);
12 struct ieee80211_sub_if_data *sdata; 12 struct ieee80211_sub_if_data *sdata;
13 struct ieee80211_if_init_conf conf;
14 struct sta_info *sta; 13 struct sta_info *sta;
15 unsigned long flags;
16 14
17 ieee80211_scan_cancel(local); 15 ieee80211_scan_cancel(local);
18 16
@@ -56,22 +54,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
56 rcu_read_unlock(); 54 rcu_read_unlock();
57 55
58 /* remove STAs */ 56 /* remove STAs */
59 spin_lock_irqsave(&local->sta_lock, flags); 57 mutex_lock(&local->sta_mtx);
60 list_for_each_entry(sta, &local->sta_list, list) { 58 list_for_each_entry(sta, &local->sta_list, list) {
61 if (local->ops->sta_notify) { 59 if (sta->uploaded) {
62 sdata = sta->sdata; 60 sdata = sta->sdata;
63 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 61 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
64 sdata = container_of(sdata->bss, 62 sdata = container_of(sdata->bss,
65 struct ieee80211_sub_if_data, 63 struct ieee80211_sub_if_data,
66 u.ap); 64 u.ap);
67 65
68 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE, 66 drv_sta_remove(local, sdata, &sta->sta);
69 &sta->sta);
70 } 67 }
71 68
72 mesh_plink_quiesce(sta); 69 mesh_plink_quiesce(sta);
73 } 70 }
74 spin_unlock_irqrestore(&local->sta_lock, flags); 71 mutex_unlock(&local->sta_mtx);
75 72
76 /* remove all interfaces */ 73 /* remove all interfaces */
77 list_for_each_entry(sdata, &local->interfaces, list) { 74 list_for_each_entry(sdata, &local->interfaces, list) {
@@ -93,17 +90,14 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
93 break; 90 break;
94 } 91 }
95 92
96 if (!netif_running(sdata->dev)) 93 if (!ieee80211_sdata_running(sdata))
97 continue; 94 continue;
98 95
99 /* disable beaconing */ 96 /* disable beaconing */
100 ieee80211_bss_info_change_notify(sdata, 97 ieee80211_bss_info_change_notify(sdata,
101 BSS_CHANGED_BEACON_ENABLED); 98 BSS_CHANGED_BEACON_ENABLED);
102 99
103 conf.vif = &sdata->vif; 100 drv_remove_interface(local, &sdata->vif);
104 conf.type = sdata->vif.type;
105 conf.mac_addr = sdata->dev->dev_addr;
106 drv_remove_interface(local, &conf);
107 } 101 }
108 102
109 /* stop hardware - this must stop RX */ 103 /* stop hardware - this must stop RX */
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 12a2bff7dcd..0b299d236fa 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -145,7 +145,7 @@ static const struct file_operations rcname_ops = {
145}; 145};
146#endif 146#endif
147 147
148struct rate_control_ref *rate_control_alloc(const char *name, 148static struct rate_control_ref *rate_control_alloc(const char *name,
149 struct ieee80211_local *local) 149 struct ieee80211_local *local)
150{ 150{
151 struct dentry *debugfsdir = NULL; 151 struct dentry *debugfsdir = NULL;
@@ -207,6 +207,27 @@ static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
207 return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc)); 207 return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc));
208} 208}
209 209
210static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, u8 max_rate_idx)
211{
212 u8 i;
213
214 if (basic_rates == 0)
215 return; /* assume basic rates unknown and accept rate */
216 if (*idx < 0)
217 return;
218 if (basic_rates & (1 << *idx))
219 return; /* selected rate is a basic rate */
220
221 for (i = *idx + 1; i <= max_rate_idx; i++) {
222 if (basic_rates & (1 << i)) {
223 *idx = i;
224 return;
225 }
226 }
227
228 /* could not find a basic rate; use original selection */
229}
230
210bool rate_control_send_low(struct ieee80211_sta *sta, 231bool rate_control_send_low(struct ieee80211_sta *sta,
211 void *priv_sta, 232 void *priv_sta,
212 struct ieee80211_tx_rate_control *txrc) 233 struct ieee80211_tx_rate_control *txrc)
@@ -218,12 +239,48 @@ bool rate_control_send_low(struct ieee80211_sta *sta,
218 info->control.rates[0].count = 239 info->control.rates[0].count =
219 (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 240 (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
220 1 : txrc->hw->max_rate_tries; 241 1 : txrc->hw->max_rate_tries;
242 if (!sta && txrc->ap)
243 rc_send_low_broadcast(&info->control.rates[0].idx,
244 txrc->bss_conf->basic_rates,
245 txrc->sband->n_bitrates);
221 return true; 246 return true;
222 } 247 }
223 return false; 248 return false;
224} 249}
225EXPORT_SYMBOL(rate_control_send_low); 250EXPORT_SYMBOL(rate_control_send_low);
226 251
252static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
253 int n_bitrates, u32 mask)
254{
255 int j;
256
257 /* See whether the selected rate or anything below it is allowed. */
258 for (j = rate->idx; j >= 0; j--) {
259 if (mask & (1 << j)) {
260 /* Okay, found a suitable rate. Use it. */
261 rate->idx = j;
262 return;
263 }
264 }
265
266 /* Try to find a higher rate that would be allowed */
267 for (j = rate->idx + 1; j < n_bitrates; j++) {
268 if (mask & (1 << j)) {
269 /* Okay, found a suitable rate. Use it. */
270 rate->idx = j;
271 return;
272 }
273 }
274
275 /*
276 * Uh.. No suitable rate exists. This should not really happen with
277 * sane TX rate mask configurations. However, should someone manage to
278 * configure supported rates and TX rate mask in incompatible way,
279 * allow the frame to be transmitted with whatever the rate control
280 * selected.
281 */
282}
283
227void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, 284void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
228 struct sta_info *sta, 285 struct sta_info *sta,
229 struct ieee80211_tx_rate_control *txrc) 286 struct ieee80211_tx_rate_control *txrc)
@@ -233,6 +290,7 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
233 struct ieee80211_sta *ista = NULL; 290 struct ieee80211_sta *ista = NULL;
234 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); 291 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
235 int i; 292 int i;
293 u32 mask;
236 294
237 if (sta) { 295 if (sta) {
238 ista = &sta->sta; 296 ista = &sta->sta;
@@ -248,23 +306,31 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
248 if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) 306 if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
249 return; 307 return;
250 308
251 if (sta && sdata->force_unicast_rateidx > -1) { 309 ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
252 info->control.rates[0].idx = sdata->force_unicast_rateidx;
253 } else {
254 ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
255 info->flags |= IEEE80211_TX_INTFL_RCALGO;
256 }
257 310
258 /* 311 /*
259 * try to enforce the maximum rate the user wanted 312 * Try to enforce the rateidx mask the user wanted. skip this if the
313 * default mask (allow all rates) is used to save some processing for
314 * the common case.
260 */ 315 */
261 if (sdata->max_ratectrl_rateidx > -1) 316 mask = sdata->rc_rateidx_mask[info->band];
317 if (mask != (1 << txrc->sband->n_bitrates) - 1) {
318 if (sta) {
319 /* Filter out rates that the STA does not support */
320 mask &= sta->sta.supp_rates[info->band];
321 }
322 /*
323 * Make sure the rate index selected for each TX rate is
324 * included in the configured mask and change the rate indexes
325 * if needed.
326 */
262 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 327 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
328 /* Rate masking supports only legacy rates for now */
263 if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS) 329 if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS)
264 continue; 330 continue;
265 info->control.rates[i].idx = 331 rate_idx_match_mask(&info->control.rates[i],
266 min_t(s8, info->control.rates[i].idx, 332 txrc->sband->n_bitrates, mask);
267 sdata->max_ratectrl_rateidx); 333 }
268 } 334 }
269 335
270 BUG_ON(info->control.rates[0].idx < 0); 336 BUG_ON(info->control.rates[0].idx < 0);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index cb9bd1f65e2..b6108bca96d 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -26,10 +26,6 @@ struct rate_control_ref {
26 struct kref kref; 26 struct kref kref;
27}; 27};
28 28
29/* Get a reference to the rate control algorithm. If `name' is NULL, get the
30 * first available algorithm. */
31struct rate_control_ref *rate_control_alloc(const char *name,
32 struct ieee80211_local *local);
33void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, 29void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
34 struct sta_info *sta, 30 struct sta_info *sta,
35 struct ieee80211_tx_rate_control *txrc); 31 struct ieee80211_tx_rate_control *txrc);
@@ -44,10 +40,11 @@ static inline void rate_control_tx_status(struct ieee80211_local *local,
44 struct rate_control_ref *ref = local->rate_ctrl; 40 struct rate_control_ref *ref = local->rate_ctrl;
45 struct ieee80211_sta *ista = &sta->sta; 41 struct ieee80211_sta *ista = &sta->sta;
46 void *priv_sta = sta->rate_ctrl_priv; 42 void *priv_sta = sta->rate_ctrl_priv;
47 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
48 43
49 if (likely(info->flags & IEEE80211_TX_INTFL_RCALGO)) 44 if (!ref)
50 ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb); 45 return;
46
47 ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
51} 48}
52 49
53 50
@@ -115,7 +112,8 @@ static inline void rate_control_remove_sta_debugfs(struct sta_info *sta)
115#endif 112#endif
116} 113}
117 114
118/* functions for rate control related to a device */ 115/* Get a reference to the rate control algorithm. If `name' is NULL, get the
116 * first available algorithm. */
119int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, 117int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
120 const char *name); 118 const char *name);
121void rate_control_deinitialize(struct ieee80211_local *local); 119void rate_control_deinitialize(struct ieee80211_local *local);
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index 29bc4c51623..2652a374974 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -157,9 +157,7 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
157 157
158 /* In case nothing happened during the previous control interval, turn 158 /* In case nothing happened during the previous control interval, turn
159 * the sharpening factor on. */ 159 * the sharpening factor on. */
160 period = (HZ * pinfo->sampling_period + 500) / 1000; 160 period = msecs_to_jiffies(pinfo->sampling_period);
161 if (!period)
162 period = 1;
163 if (jiffies - spinfo->last_sample > 2 * period) 161 if (jiffies - spinfo->last_sample > 2 * period)
164 spinfo->sharp_cnt = pinfo->sharpen_duration; 162 spinfo->sharp_cnt = pinfo->sharpen_duration;
165 163
@@ -252,9 +250,7 @@ static void rate_control_pid_tx_status(void *priv, struct ieee80211_supported_ba
252 } 250 }
253 251
254 /* Update PID controller state. */ 252 /* Update PID controller state. */
255 period = (HZ * pinfo->sampling_period + 500) / 1000; 253 period = msecs_to_jiffies(pinfo->sampling_period);
256 if (!period)
257 period = 1;
258 if (time_after(jiffies, spinfo->last_sample + period)) 254 if (time_after(jiffies, spinfo->last_sample + period))
259 rate_control_pid_sample(pinfo, sband, sta, spinfo); 255 rate_control_pid_sample(pinfo, sband, sta, spinfo);
260} 256}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 82a30c1bf3a..c9755f3d986 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -283,15 +283,15 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
283 skb->protocol = htons(ETH_P_802_2); 283 skb->protocol = htons(ETH_P_802_2);
284 284
285 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 285 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
286 if (!netif_running(sdata->dev))
287 continue;
288
289 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 286 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
290 continue; 287 continue;
291 288
292 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) 289 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
293 continue; 290 continue;
294 291
292 if (!ieee80211_sdata_running(sdata))
293 continue;
294
295 if (prev_dev) { 295 if (prev_dev) {
296 skb2 = skb_clone(skb, GFP_ATOMIC); 296 skb2 = skb_clone(skb, GFP_ATOMIC);
297 if (skb2) { 297 if (skb2) {
@@ -361,7 +361,9 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
361 * boundary. In the case of regular frames, this simply means aligning the 361 * boundary. In the case of regular frames, this simply means aligning the
362 * payload to a four-byte boundary (because either the IP header is directly 362 * payload to a four-byte boundary (because either the IP header is directly
363 * contained, or IV/RFC1042 headers that have a length divisible by four are 363 * contained, or IV/RFC1042 headers that have a length divisible by four are
364 * in front of it). 364 * in front of it). If the payload data is not properly aligned and the
365 * architecture doesn't support efficient unaligned operations, mac80211
366 * will align the data.
365 * 367 *
366 * With A-MSDU frames, however, the payload data address must yield two modulo 368 * With A-MSDU frames, however, the payload data address must yield two modulo
367 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 369 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
@@ -375,25 +377,10 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
375 */ 377 */
376static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 378static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
377{ 379{
378 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 380#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
379 int hdrlen; 381 WARN_ONCE((unsigned long)rx->skb->data & 1,
380 382 "unaligned packet at 0x%p\n", rx->skb->data);
381#ifndef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
382 return;
383#endif 383#endif
384
385 if (WARN_ONCE((unsigned long)rx->skb->data & 1,
386 "unaligned packet at 0x%p\n", rx->skb->data))
387 return;
388
389 if (!ieee80211_is_data_present(hdr->frame_control))
390 return;
391
392 hdrlen = ieee80211_hdrlen(hdr->frame_control);
393 if (rx->flags & IEEE80211_RX_AMSDU)
394 hdrlen += ETH_HLEN;
395 WARN_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3,
396 "unaligned IP payload at 0x%p\n", rx->skb->data + hdrlen);
397} 384}
398 385
399 386
@@ -476,7 +463,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
476{ 463{
477 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 464 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
478 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); 465 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
479 char *dev_addr = rx->sdata->dev->dev_addr; 466 char *dev_addr = rx->sdata->vif.addr;
480 467
481 if (ieee80211_is_data(hdr->frame_control)) { 468 if (ieee80211_is_data(hdr->frame_control)) {
482 if (is_multicast_ether_addr(hdr->addr1)) { 469 if (is_multicast_ether_addr(hdr->addr1)) {
@@ -1021,10 +1008,10 @@ static void ap_sta_ps_start(struct sta_info *sta)
1021 1008
1022 atomic_inc(&sdata->bss->num_sta_ps); 1009 atomic_inc(&sdata->bss->num_sta_ps);
1023 set_sta_flags(sta, WLAN_STA_PS_STA); 1010 set_sta_flags(sta, WLAN_STA_PS_STA);
1024 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta); 1011 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1025#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1012#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1026 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", 1013 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
1027 sdata->dev->name, sta->sta.addr, sta->sta.aid); 1014 sdata->name, sta->sta.addr, sta->sta.aid);
1028#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1015#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1029} 1016}
1030 1017
@@ -1038,13 +1025,13 @@ static void ap_sta_ps_end(struct sta_info *sta)
1038 1025
1039#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1026#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1040 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", 1027 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
1041 sdata->dev->name, sta->sta.addr, sta->sta.aid); 1028 sdata->name, sta->sta.addr, sta->sta.aid);
1042#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1029#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1043 1030
1044 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) { 1031 if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
1045#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1032#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1046 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n", 1033 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
1047 sdata->dev->name, sta->sta.addr, sta->sta.aid); 1034 sdata->name, sta->sta.addr, sta->sta.aid);
1048#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1035#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1049 return; 1036 return;
1050 } 1037 }
@@ -1124,6 +1111,18 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1124 if (ieee80211_is_nullfunc(hdr->frame_control) || 1111 if (ieee80211_is_nullfunc(hdr->frame_control) ||
1125 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 1112 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1126 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1113 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1114
1115 /*
1116 * If we receive a 4-addr nullfunc frame from a STA
1117 * that was not moved to a 4-addr STA vlan yet, drop
1118 * the frame to the monitor interface, to make sure
1119 * that hostapd sees it
1120 */
1121 if (ieee80211_has_a4(hdr->frame_control) &&
1122 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1123 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1124 !rx->sdata->u.vlan.sta)))
1125 return RX_DROP_MONITOR;
1127 /* 1126 /*
1128 * Update counter and free packet here to avoid 1127 * Update counter and free packet here to avoid
1129 * counting this as a dropped packed. 1128 * counting this as a dropped packed.
@@ -1156,7 +1155,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
1156 printk(KERN_DEBUG "%s: RX reassembly removed oldest " 1155 printk(KERN_DEBUG "%s: RX reassembly removed oldest "
1157 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d " 1156 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
1158 "addr1=%pM addr2=%pM\n", 1157 "addr1=%pM addr2=%pM\n",
1159 sdata->dev->name, idx, 1158 sdata->name, idx,
1160 jiffies - entry->first_frag_time, entry->seq, 1159 jiffies - entry->first_frag_time, entry->seq,
1161 entry->last_frag, hdr->addr1, hdr->addr2); 1160 entry->last_frag, hdr->addr1, hdr->addr2);
1162#endif 1161#endif
@@ -1424,7 +1423,6 @@ static int
1424__ieee80211_data_to_8023(struct ieee80211_rx_data *rx) 1423__ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1425{ 1424{
1426 struct ieee80211_sub_if_data *sdata = rx->sdata; 1425 struct ieee80211_sub_if_data *sdata = rx->sdata;
1427 struct net_device *dev = sdata->dev;
1428 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1426 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1429 1427
1430 if (ieee80211_has_a4(hdr->frame_control) && 1428 if (ieee80211_has_a4(hdr->frame_control) &&
@@ -1436,7 +1434,7 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1436 (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr))) 1434 (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr)))
1437 return -1; 1435 return -1;
1438 1436
1439 return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type); 1437 return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1440} 1438}
1441 1439
1442/* 1440/*
@@ -1453,7 +1451,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1453 * of whether the frame was encrypted or not. 1451 * of whether the frame was encrypted or not.
1454 */ 1452 */
1455 if (ehdr->h_proto == htons(ETH_P_PAE) && 1453 if (ehdr->h_proto == htons(ETH_P_PAE) &&
1456 (compare_ether_addr(ehdr->h_dest, rx->sdata->dev->dev_addr) == 0 || 1454 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
1457 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) 1455 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1458 return true; 1456 return true;
1459 1457
@@ -1472,7 +1470,6 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1472{ 1470{
1473 struct ieee80211_sub_if_data *sdata = rx->sdata; 1471 struct ieee80211_sub_if_data *sdata = rx->sdata;
1474 struct net_device *dev = sdata->dev; 1472 struct net_device *dev = sdata->dev;
1475 struct ieee80211_local *local = rx->local;
1476 struct sk_buff *skb, *xmit_skb; 1473 struct sk_buff *skb, *xmit_skb;
1477 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 1474 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
1478 struct sta_info *dsta; 1475 struct sta_info *dsta;
@@ -1495,8 +1492,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1495 printk(KERN_DEBUG "%s: failed to clone " 1492 printk(KERN_DEBUG "%s: failed to clone "
1496 "multicast frame\n", dev->name); 1493 "multicast frame\n", dev->name);
1497 } else { 1494 } else {
1498 dsta = sta_info_get(local, skb->data); 1495 dsta = sta_info_get(sdata, skb->data);
1499 if (dsta && dsta->sdata->dev == dev) { 1496 if (dsta) {
1500 /* 1497 /*
1501 * The destination station is associated to 1498 * The destination station is associated to
1502 * this AP (in this VLAN), so send the frame 1499 * this AP (in this VLAN), so send the frame
@@ -1512,7 +1509,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1512 if (skb) { 1509 if (skb) {
1513 int align __maybe_unused; 1510 int align __maybe_unused;
1514 1511
1515#if defined(CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT) || !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 1512#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1516 /* 1513 /*
1517 * 'align' will only take the values 0 or 2 here 1514 * 'align' will only take the values 0 or 2 here
1518 * since all frames are required to be aligned 1515 * since all frames are required to be aligned
@@ -1556,16 +1553,10 @@ static ieee80211_rx_result debug_noinline
1556ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 1553ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1557{ 1554{
1558 struct net_device *dev = rx->sdata->dev; 1555 struct net_device *dev = rx->sdata->dev;
1559 struct ieee80211_local *local = rx->local; 1556 struct sk_buff *skb = rx->skb;
1560 u16 ethertype;
1561 u8 *payload;
1562 struct sk_buff *skb = rx->skb, *frame = NULL;
1563 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1557 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1564 __le16 fc = hdr->frame_control; 1558 __le16 fc = hdr->frame_control;
1565 const struct ethhdr *eth; 1559 struct sk_buff_head frame_list;
1566 int remaining, err;
1567 u8 dst[ETH_ALEN];
1568 u8 src[ETH_ALEN];
1569 1560
1570 if (unlikely(!ieee80211_is_data(fc))) 1561 if (unlikely(!ieee80211_is_data(fc)))
1571 return RX_CONTINUE; 1562 return RX_CONTINUE;
@@ -1576,94 +1567,34 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1576 if (!(rx->flags & IEEE80211_RX_AMSDU)) 1567 if (!(rx->flags & IEEE80211_RX_AMSDU))
1577 return RX_CONTINUE; 1568 return RX_CONTINUE;
1578 1569
1579 err = __ieee80211_data_to_8023(rx); 1570 if (ieee80211_has_a4(hdr->frame_control) &&
1580 if (unlikely(err)) 1571 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1572 !rx->sdata->u.vlan.sta)
1581 return RX_DROP_UNUSABLE; 1573 return RX_DROP_UNUSABLE;
1582 1574
1583 skb->dev = dev; 1575 if (is_multicast_ether_addr(hdr->addr1) &&
1584 1576 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1585 dev->stats.rx_packets++; 1577 rx->sdata->u.vlan.sta) ||
1586 dev->stats.rx_bytes += skb->len; 1578 (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1587 1579 rx->sdata->u.mgd.use_4addr)))
1588 /* skip the wrapping header */
1589 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
1590 if (!eth)
1591 return RX_DROP_UNUSABLE; 1580 return RX_DROP_UNUSABLE;
1592 1581
1593 while (skb != frame) { 1582 skb->dev = dev;
1594 u8 padding; 1583 __skb_queue_head_init(&frame_list);
1595 __be16 len = eth->h_proto;
1596 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
1597
1598 remaining = skb->len;
1599 memcpy(dst, eth->h_dest, ETH_ALEN);
1600 memcpy(src, eth->h_source, ETH_ALEN);
1601
1602 padding = ((4 - subframe_len) & 0x3);
1603 /* the last MSDU has no padding */
1604 if (subframe_len > remaining)
1605 return RX_DROP_UNUSABLE;
1606 1584
1607 skb_pull(skb, sizeof(struct ethhdr)); 1585 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1608 /* if last subframe reuse skb */ 1586 rx->sdata->vif.type,
1609 if (remaining <= subframe_len + padding) 1587 rx->local->hw.extra_tx_headroom);
1610 frame = skb;
1611 else {
1612 /*
1613 * Allocate and reserve two bytes more for payload
1614 * alignment since sizeof(struct ethhdr) is 14.
1615 */
1616 frame = dev_alloc_skb(
1617 ALIGN(local->hw.extra_tx_headroom, 4) +
1618 subframe_len + 2);
1619
1620 if (frame == NULL)
1621 return RX_DROP_UNUSABLE;
1622
1623 skb_reserve(frame,
1624 ALIGN(local->hw.extra_tx_headroom, 4) +
1625 sizeof(struct ethhdr) + 2);
1626 memcpy(skb_put(frame, ntohs(len)), skb->data,
1627 ntohs(len));
1628
1629 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
1630 padding);
1631 if (!eth) {
1632 dev_kfree_skb(frame);
1633 return RX_DROP_UNUSABLE;
1634 }
1635 }
1636 1588
1637 skb_reset_network_header(frame); 1589 while (!skb_queue_empty(&frame_list)) {
1638 frame->dev = dev; 1590 rx->skb = __skb_dequeue(&frame_list);
1639 frame->priority = skb->priority;
1640 rx->skb = frame;
1641
1642 payload = frame->data;
1643 ethertype = (payload[6] << 8) | payload[7];
1644
1645 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
1646 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
1647 compare_ether_addr(payload,
1648 bridge_tunnel_header) == 0)) {
1649 /* remove RFC1042 or Bridge-Tunnel
1650 * encapsulation and replace EtherType */
1651 skb_pull(frame, 6);
1652 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1653 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1654 } else {
1655 memcpy(skb_push(frame, sizeof(__be16)),
1656 &len, sizeof(__be16));
1657 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
1658 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1659 }
1660 1591
1661 if (!ieee80211_frame_allowed(rx, fc)) { 1592 if (!ieee80211_frame_allowed(rx, fc)) {
1662 if (skb == frame) /* last frame */ 1593 dev_kfree_skb(rx->skb);
1663 return RX_DROP_UNUSABLE;
1664 dev_kfree_skb(frame);
1665 continue; 1594 continue;
1666 } 1595 }
1596 dev->stats.rx_packets++;
1597 dev->stats.rx_bytes += rx->skb->len;
1667 1598
1668 ieee80211_deliver_skb(rx); 1599 ieee80211_deliver_skb(rx);
1669 } 1600 }
@@ -1721,7 +1652,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1721 1652
1722 /* Frame has reached destination. Don't forward */ 1653 /* Frame has reached destination. Don't forward */
1723 if (!is_multicast_ether_addr(hdr->addr1) && 1654 if (!is_multicast_ether_addr(hdr->addr1) &&
1724 compare_ether_addr(sdata->dev->dev_addr, hdr->addr3) == 0) 1655 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
1725 return RX_CONTINUE; 1656 return RX_CONTINUE;
1726 1657
1727 mesh_hdr->ttl--; 1658 mesh_hdr->ttl--;
@@ -1738,10 +1669,10 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1738 1669
1739 if (!fwd_skb && net_ratelimit()) 1670 if (!fwd_skb && net_ratelimit())
1740 printk(KERN_DEBUG "%s: failed to clone mesh frame\n", 1671 printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
1741 sdata->dev->name); 1672 sdata->name);
1742 1673
1743 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 1674 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
1744 memcpy(fwd_hdr->addr2, sdata->dev->dev_addr, ETH_ALEN); 1675 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
1745 info = IEEE80211_SKB_CB(fwd_skb); 1676 info = IEEE80211_SKB_CB(fwd_skb);
1746 memset(info, 0, sizeof(*info)); 1677 memset(info, 0, sizeof(*info));
1747 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1678 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
@@ -1788,6 +1719,7 @@ static ieee80211_rx_result debug_noinline
1788ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 1719ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1789{ 1720{
1790 struct ieee80211_sub_if_data *sdata = rx->sdata; 1721 struct ieee80211_sub_if_data *sdata = rx->sdata;
1722 struct ieee80211_local *local = rx->local;
1791 struct net_device *dev = sdata->dev; 1723 struct net_device *dev = sdata->dev;
1792 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1724 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1793 __le16 fc = hdr->frame_control; 1725 __le16 fc = hdr->frame_control;
@@ -1819,6 +1751,13 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1819 dev->stats.rx_packets++; 1751 dev->stats.rx_packets++;
1820 dev->stats.rx_bytes += rx->skb->len; 1752 dev->stats.rx_bytes += rx->skb->len;
1821 1753
1754 if (ieee80211_is_data(hdr->frame_control) &&
1755 !is_multicast_ether_addr(hdr->addr1) &&
1756 local->hw.conf.dynamic_ps_timeout > 0 && local->ps_sdata) {
1757 mod_timer(&local->dynamic_ps_timer, jiffies +
1758 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
1759 }
1760
1822 ieee80211_deliver_skb(rx); 1761 ieee80211_deliver_skb(rx);
1823 1762
1824 return RX_QUEUED; 1763 return RX_QUEUED;
@@ -1872,7 +1811,7 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1872 struct sk_buff *skb; 1811 struct sk_buff *skb;
1873 struct ieee80211_mgmt *resp; 1812 struct ieee80211_mgmt *resp;
1874 1813
1875 if (compare_ether_addr(mgmt->da, sdata->dev->dev_addr) != 0) { 1814 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
1876 /* Not to own unicast address */ 1815 /* Not to own unicast address */
1877 return; 1816 return;
1878 } 1817 }
@@ -1896,7 +1835,7 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1896 resp = (struct ieee80211_mgmt *) skb_put(skb, 24); 1835 resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
1897 memset(resp, 0, 24); 1836 memset(resp, 0, 24);
1898 memcpy(resp->da, mgmt->sa, ETH_ALEN); 1837 memcpy(resp->da, mgmt->sa, ETH_ALEN);
1899 memcpy(resp->sa, sdata->dev->dev_addr, ETH_ALEN); 1838 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
1900 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 1839 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
1901 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 1840 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1902 IEEE80211_STYPE_ACTION); 1841 IEEE80211_STYPE_ACTION);
@@ -2032,6 +1971,7 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2032{ 1971{
2033 struct ieee80211_sub_if_data *sdata = rx->sdata; 1972 struct ieee80211_sub_if_data *sdata = rx->sdata;
2034 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 1973 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1974 ieee80211_rx_result rxs;
2035 1975
2036 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 1976 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
2037 return RX_DROP_MONITOR; 1977 return RX_DROP_MONITOR;
@@ -2039,6 +1979,10 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2039 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control)) 1979 if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
2040 return RX_DROP_MONITOR; 1980 return RX_DROP_MONITOR;
2041 1981
1982 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
1983 if (rxs != RX_CONTINUE)
1984 return rxs;
1985
2042 if (ieee80211_vif_is_mesh(&sdata->vif)) 1986 if (ieee80211_vif_is_mesh(&sdata->vif))
2043 return ieee80211_mesh_rx_mgmt(sdata, rx->skb); 1987 return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
2044 1988
@@ -2143,7 +2087,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2143 skb->protocol = htons(ETH_P_802_2); 2087 skb->protocol = htons(ETH_P_802_2);
2144 2088
2145 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2089 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2146 if (!netif_running(sdata->dev)) 2090 if (!ieee80211_sdata_running(sdata))
2147 continue; 2091 continue;
2148 2092
2149 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 2093 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
@@ -2280,7 +2224,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2280 if (!bssid && !sdata->u.mgd.use_4addr) 2224 if (!bssid && !sdata->u.mgd.use_4addr)
2281 return 0; 2225 return 0;
2282 if (!multicast && 2226 if (!multicast &&
2283 compare_ether_addr(sdata->dev->dev_addr, hdr->addr1) != 0) { 2227 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
2284 if (!(sdata->dev->flags & IFF_PROMISC)) 2228 if (!(sdata->dev->flags & IFF_PROMISC))
2285 return 0; 2229 return 0;
2286 rx->flags &= ~IEEE80211_RX_RA_MATCH; 2230 rx->flags &= ~IEEE80211_RX_RA_MATCH;
@@ -2297,7 +2241,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2297 return 0; 2241 return 0;
2298 rx->flags &= ~IEEE80211_RX_RA_MATCH; 2242 rx->flags &= ~IEEE80211_RX_RA_MATCH;
2299 } else if (!multicast && 2243 } else if (!multicast &&
2300 compare_ether_addr(sdata->dev->dev_addr, 2244 compare_ether_addr(sdata->vif.addr,
2301 hdr->addr1) != 0) { 2245 hdr->addr1) != 0) {
2302 if (!(sdata->dev->flags & IFF_PROMISC)) 2246 if (!(sdata->dev->flags & IFF_PROMISC))
2303 return 0; 2247 return 0;
@@ -2308,13 +2252,13 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2308 rate_idx = 0; /* TODO: HT rates */ 2252 rate_idx = 0; /* TODO: HT rates */
2309 else 2253 else
2310 rate_idx = status->rate_idx; 2254 rate_idx = status->rate_idx;
2311 rx->sta = ieee80211_ibss_add_sta(sdata, bssid, hdr->addr2, 2255 rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
2312 BIT(rate_idx)); 2256 hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
2313 } 2257 }
2314 break; 2258 break;
2315 case NL80211_IFTYPE_MESH_POINT: 2259 case NL80211_IFTYPE_MESH_POINT:
2316 if (!multicast && 2260 if (!multicast &&
2317 compare_ether_addr(sdata->dev->dev_addr, 2261 compare_ether_addr(sdata->vif.addr,
2318 hdr->addr1) != 0) { 2262 hdr->addr1) != 0) {
2319 if (!(sdata->dev->flags & IFF_PROMISC)) 2263 if (!(sdata->dev->flags & IFF_PROMISC))
2320 return 0; 2264 return 0;
@@ -2325,11 +2269,11 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2325 case NL80211_IFTYPE_AP_VLAN: 2269 case NL80211_IFTYPE_AP_VLAN:
2326 case NL80211_IFTYPE_AP: 2270 case NL80211_IFTYPE_AP:
2327 if (!bssid) { 2271 if (!bssid) {
2328 if (compare_ether_addr(sdata->dev->dev_addr, 2272 if (compare_ether_addr(sdata->vif.addr,
2329 hdr->addr1)) 2273 hdr->addr1))
2330 return 0; 2274 return 0;
2331 } else if (!ieee80211_bssid_match(bssid, 2275 } else if (!ieee80211_bssid_match(bssid,
2332 sdata->dev->dev_addr)) { 2276 sdata->vif.addr)) {
2333 if (!(rx->flags & IEEE80211_RX_IN_SCAN)) 2277 if (!(rx->flags & IEEE80211_RX_IN_SCAN))
2334 return 0; 2278 return 0;
2335 rx->flags &= ~IEEE80211_RX_RA_MATCH; 2279 rx->flags &= ~IEEE80211_RX_RA_MATCH;
@@ -2368,6 +2312,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2368 int prepares; 2312 int prepares;
2369 struct ieee80211_sub_if_data *prev = NULL; 2313 struct ieee80211_sub_if_data *prev = NULL;
2370 struct sk_buff *skb_new; 2314 struct sk_buff *skb_new;
2315 struct sta_info *sta, *tmp;
2316 bool found_sta = false;
2371 2317
2372 hdr = (struct ieee80211_hdr *)skb->data; 2318 hdr = (struct ieee80211_hdr *)skb->data;
2373 memset(&rx, 0, sizeof(rx)); 2319 memset(&rx, 0, sizeof(rx));
@@ -2384,68 +2330,87 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2384 ieee80211_parse_qos(&rx); 2330 ieee80211_parse_qos(&rx);
2385 ieee80211_verify_alignment(&rx); 2331 ieee80211_verify_alignment(&rx);
2386 2332
2387 rx.sta = sta_info_get(local, hdr->addr2); 2333 if (ieee80211_is_data(hdr->frame_control)) {
2388 if (rx.sta) 2334 for_each_sta_info(local, hdr->addr2, sta, tmp) {
2389 rx.sdata = rx.sta->sdata; 2335 rx.sta = sta;
2390 2336 found_sta = true;
2391 if (rx.sdata && ieee80211_is_data(hdr->frame_control)) { 2337 rx.sdata = sta->sdata;
2392 rx.flags |= IEEE80211_RX_RA_MATCH; 2338
2393 prepares = prepare_for_handlers(rx.sdata, &rx, hdr); 2339 rx.flags |= IEEE80211_RX_RA_MATCH;
2394 if (prepares) { 2340 prepares = prepare_for_handlers(rx.sdata, &rx, hdr);
2395 if (status->flag & RX_FLAG_MMIC_ERROR) { 2341 if (prepares) {
2396 if (rx.flags & IEEE80211_RX_RA_MATCH) 2342 if (status->flag & RX_FLAG_MMIC_ERROR) {
2397 ieee80211_rx_michael_mic_report(hdr, &rx); 2343 if (rx.flags & IEEE80211_RX_RA_MATCH)
2398 } else 2344 ieee80211_rx_michael_mic_report(hdr, &rx);
2399 prev = rx.sdata; 2345 } else
2346 prev = rx.sdata;
2347 }
2400 } 2348 }
2401 } else list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2349 }
2402 if (!netif_running(sdata->dev)) 2350 if (!found_sta) {
2403 continue; 2351 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
2352 if (!ieee80211_sdata_running(sdata))
2353 continue;
2404 2354
2405 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 2355 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
2406 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 2356 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
2407 continue; 2357 continue;
2408 2358
2409 rx.flags |= IEEE80211_RX_RA_MATCH; 2359 /*
2410 prepares = prepare_for_handlers(sdata, &rx, hdr); 2360 * frame is destined for this interface, but if it's
2361 * not also for the previous one we handle that after
2362 * the loop to avoid copying the SKB once too much
2363 */
2411 2364
2412 if (!prepares) 2365 if (!prev) {
2413 continue; 2366 prev = sdata;
2367 continue;
2368 }
2414 2369
2415 if (status->flag & RX_FLAG_MMIC_ERROR) { 2370 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2416 rx.sdata = sdata;
2417 if (rx.flags & IEEE80211_RX_RA_MATCH)
2418 ieee80211_rx_michael_mic_report(hdr, &rx);
2419 continue;
2420 }
2421 2371
2422 /* 2372 rx.flags |= IEEE80211_RX_RA_MATCH;
2423 * frame is destined for this interface, but if it's not 2373 prepares = prepare_for_handlers(prev, &rx, hdr);
2424 * also for the previous one we handle that after the 2374
2425 * loop to avoid copying the SKB once too much 2375 if (!prepares)
2426 */ 2376 goto next;
2377
2378 if (status->flag & RX_FLAG_MMIC_ERROR) {
2379 rx.sdata = prev;
2380 if (rx.flags & IEEE80211_RX_RA_MATCH)
2381 ieee80211_rx_michael_mic_report(hdr,
2382 &rx);
2383 goto next;
2384 }
2427 2385
2428 if (!prev) { 2386 /*
2387 * frame was destined for the previous interface
2388 * so invoke RX handlers for it
2389 */
2390
2391 skb_new = skb_copy(skb, GFP_ATOMIC);
2392 if (!skb_new) {
2393 if (net_ratelimit())
2394 printk(KERN_DEBUG "%s: failed to copy "
2395 "multicast frame for %s\n",
2396 wiphy_name(local->hw.wiphy),
2397 prev->name);
2398 goto next;
2399 }
2400 ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate);
2401next:
2429 prev = sdata; 2402 prev = sdata;
2430 continue;
2431 } 2403 }
2432 2404
2433 /* 2405 if (prev) {
2434 * frame was destined for the previous interface 2406 rx.sta = sta_info_get_bss(prev, hdr->addr2);
2435 * so invoke RX handlers for it
2436 */
2437 2407
2438 skb_new = skb_copy(skb, GFP_ATOMIC); 2408 rx.flags |= IEEE80211_RX_RA_MATCH;
2439 if (!skb_new) { 2409 prepares = prepare_for_handlers(prev, &rx, hdr);
2440 if (net_ratelimit()) 2410
2441 printk(KERN_DEBUG "%s: failed to copy " 2411 if (!prepares)
2442 "multicast frame for %s\n", 2412 prev = NULL;
2443 wiphy_name(local->hw.wiphy),
2444 prev->dev->name);
2445 continue;
2446 } 2413 }
2447 ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate);
2448 prev = sdata;
2449 } 2414 }
2450 if (prev) 2415 if (prev)
2451 ieee80211_invoke_rx_handlers(prev, &rx, skb, rate); 2416 ieee80211_invoke_rx_handlers(prev, &rx, skb, rate);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index bc17cf7d68d..b822dce9786 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -12,7 +12,6 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14 14
15#include <linux/wireless.h>
16#include <linux/if_arp.h> 15#include <linux/if_arp.h>
17#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
18#include <net/mac80211.h> 17#include <net/mac80211.h>
@@ -29,16 +28,19 @@ struct ieee80211_bss *
29ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq, 28ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
30 u8 *ssid, u8 ssid_len) 29 u8 *ssid, u8 ssid_len)
31{ 30{
32 return (void *)cfg80211_get_bss(local->hw.wiphy, 31 struct cfg80211_bss *cbss;
33 ieee80211_get_channel(local->hw.wiphy, 32
34 freq), 33 cbss = cfg80211_get_bss(local->hw.wiphy,
35 bssid, ssid, ssid_len, 34 ieee80211_get_channel(local->hw.wiphy, freq),
36 0, 0); 35 bssid, ssid, ssid_len, 0, 0);
36 if (!cbss)
37 return NULL;
38 return (void *)cbss->priv;
37} 39}
38 40
39static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss) 41static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
40{ 42{
41 struct ieee80211_bss *bss = (void *)cbss; 43 struct ieee80211_bss *bss = (void *)cbss->priv;
42 44
43 kfree(bss_mesh_id(bss)); 45 kfree(bss_mesh_id(bss));
44 kfree(bss_mesh_cfg(bss)); 46 kfree(bss_mesh_cfg(bss));
@@ -47,7 +49,26 @@ static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
47void ieee80211_rx_bss_put(struct ieee80211_local *local, 49void ieee80211_rx_bss_put(struct ieee80211_local *local,
48 struct ieee80211_bss *bss) 50 struct ieee80211_bss *bss)
49{ 51{
50 cfg80211_put_bss((struct cfg80211_bss *)bss); 52 if (!bss)
53 return;
54 cfg80211_put_bss(container_of((void *)bss, struct cfg80211_bss, priv));
55}
56
57static bool is_uapsd_supported(struct ieee802_11_elems *elems)
58{
59 u8 qos_info;
60
61 if (elems->wmm_info && elems->wmm_info_len == 7
62 && elems->wmm_info[5] == 1)
63 qos_info = elems->wmm_info[6];
64 else if (elems->wmm_param && elems->wmm_param_len == 24
65 && elems->wmm_param[5] == 1)
66 qos_info = elems->wmm_param[6];
67 else
68 /* no valid wmm information or parameter element found */
69 return false;
70
71 return qos_info & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD;
51} 72}
52 73
53struct ieee80211_bss * 74struct ieee80211_bss *
@@ -59,6 +80,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
59 struct ieee80211_channel *channel, 80 struct ieee80211_channel *channel,
60 bool beacon) 81 bool beacon)
61{ 82{
83 struct cfg80211_bss *cbss;
62 struct ieee80211_bss *bss; 84 struct ieee80211_bss *bss;
63 int clen; 85 int clen;
64 s32 signal = 0; 86 s32 signal = 0;
@@ -68,13 +90,14 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
68 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) 90 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
69 signal = (rx_status->signal * 100) / local->hw.max_signal; 91 signal = (rx_status->signal * 100) / local->hw.max_signal;
70 92
71 bss = (void *)cfg80211_inform_bss_frame(local->hw.wiphy, channel, 93 cbss = cfg80211_inform_bss_frame(local->hw.wiphy, channel,
72 mgmt, len, signal, GFP_ATOMIC); 94 mgmt, len, signal, GFP_ATOMIC);
73 95
74 if (!bss) 96 if (!cbss)
75 return NULL; 97 return NULL;
76 98
77 bss->cbss.free_priv = ieee80211_rx_bss_free; 99 cbss->free_priv = ieee80211_rx_bss_free;
100 bss = (void *)cbss->priv;
78 101
79 /* save the ERP value so that it is available at association time */ 102 /* save the ERP value so that it is available at association time */
80 if (elems->erp_info && elems->erp_info_len >= 1) { 103 if (elems->erp_info && elems->erp_info_len >= 1) {
@@ -88,10 +111,6 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
88 bss->dtim_period = tim_ie->dtim_period; 111 bss->dtim_period = tim_ie->dtim_period;
89 } 112 }
90 113
91 /* set default value for buggy AP/no TIM element */
92 if (bss->dtim_period == 0)
93 bss->dtim_period = 1;
94
95 bss->supp_rates_len = 0; 114 bss->supp_rates_len = 0;
96 if (elems->supp_rates) { 115 if (elems->supp_rates) {
97 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; 116 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
@@ -111,6 +130,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
111 } 130 }
112 131
113 bss->wmm_used = elems->wmm_param || elems->wmm_info; 132 bss->wmm_used = elems->wmm_param || elems->wmm_info;
133 bss->uapsd_supported = is_uapsd_supported(elems);
114 134
115 if (!beacon) 135 if (!beacon)
116 bss->last_probe_resp = jiffies; 136 bss->last_probe_resp = jiffies;
@@ -147,7 +167,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
147 presp = ieee80211_is_probe_resp(fc); 167 presp = ieee80211_is_probe_resp(fc);
148 if (presp) { 168 if (presp) {
149 /* ignore ProbeResp to foreign address */ 169 /* ignore ProbeResp to foreign address */
150 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) 170 if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
151 return RX_DROP_MONITOR; 171 return RX_DROP_MONITOR;
152 172
153 presp = true; 173 presp = true;
@@ -220,82 +240,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
220 return true; 240 return true;
221} 241}
222 242
223/*
224 * inform AP that we will go to sleep so that it will buffer the frames
225 * while we scan
226 */
227static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata)
228{
229 struct ieee80211_local *local = sdata->local;
230
231 local->scan_ps_enabled = false;
232
233 /* FIXME: what to do when local->pspolling is true? */
234
235 del_timer_sync(&local->dynamic_ps_timer);
236 cancel_work_sync(&local->dynamic_ps_enable_work);
237
238 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
239 local->scan_ps_enabled = true;
240 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
241 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
242 }
243
244 if (!(local->scan_ps_enabled) ||
245 !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
246 /*
247 * If power save was enabled, no need to send a nullfunc
248 * frame because AP knows that we are sleeping. But if the
249 * hardware is creating the nullfunc frame for power save
250 * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
251 * enabled) and power save was enabled, the firmware just
252 * sent a null frame with power save disabled. So we need
253 * to send a new nullfunc frame to inform the AP that we
254 * are again sleeping.
255 */
256 ieee80211_send_nullfunc(local, sdata, 1);
257}
258
259/* inform AP that we are awake again, unless power save is enabled */
260static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
261{
262 struct ieee80211_local *local = sdata->local;
263
264 if (!local->ps_sdata)
265 ieee80211_send_nullfunc(local, sdata, 0);
266 else if (local->scan_ps_enabled) {
267 /*
268 * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
269 * will send a nullfunc frame with the powersave bit set
270 * even though the AP already knows that we are sleeping.
271 * This could be avoided by sending a null frame with power
272 * save bit disabled before enabling the power save, but
273 * this doesn't gain anything.
274 *
275 * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
276 * to send a nullfunc frame because AP already knows that
277 * we are sleeping, let's just enable power save mode in
278 * hardware.
279 */
280 local->hw.conf.flags |= IEEE80211_CONF_PS;
281 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
282 } else if (local->hw.conf.dynamic_ps_timeout > 0) {
283 /*
284 * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
285 * had been running before leaving the operating channel,
286 * restart the timer now and send a nullfunc frame to inform
287 * the AP that we are awake.
288 */
289 ieee80211_send_nullfunc(local, sdata, 0);
290 mod_timer(&local->dynamic_ps_timer, jiffies +
291 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
292 }
293}
294
295void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) 243void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
296{ 244{
297 struct ieee80211_local *local = hw_to_local(hw); 245 struct ieee80211_local *local = hw_to_local(hw);
298 struct ieee80211_sub_if_data *sdata;
299 bool was_hw_scan; 246 bool was_hw_scan;
300 247
301 mutex_lock(&local->scan_mtx); 248 mutex_lock(&local->scan_mtx);
@@ -344,41 +291,19 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
344 291
345 drv_sw_scan_complete(local); 292 drv_sw_scan_complete(local);
346 293
347 mutex_lock(&local->iflist_mtx); 294 ieee80211_offchannel_return(local, true);
348 list_for_each_entry(sdata, &local->interfaces, list) {
349 if (!netif_running(sdata->dev))
350 continue;
351
352 /* Tell AP we're back */
353 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
354 if (sdata->u.mgd.associated) {
355 ieee80211_scan_ps_disable(sdata);
356 netif_tx_wake_all_queues(sdata->dev);
357 }
358 } else
359 netif_tx_wake_all_queues(sdata->dev);
360
361 /* re-enable beaconing */
362 if (sdata->vif.type == NL80211_IFTYPE_AP ||
363 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
364 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
365 ieee80211_bss_info_change_notify(
366 sdata, BSS_CHANGED_BEACON_ENABLED);
367 }
368 mutex_unlock(&local->iflist_mtx);
369 295
370 done: 296 done:
371 ieee80211_recalc_idle(local); 297 ieee80211_recalc_idle(local);
372 ieee80211_mlme_notify_scan_completed(local); 298 ieee80211_mlme_notify_scan_completed(local);
373 ieee80211_ibss_notify_scan_completed(local); 299 ieee80211_ibss_notify_scan_completed(local);
374 ieee80211_mesh_notify_scan_completed(local); 300 ieee80211_mesh_notify_scan_completed(local);
301 ieee80211_queue_work(&local->hw, &local->work_work);
375} 302}
376EXPORT_SYMBOL(ieee80211_scan_completed); 303EXPORT_SYMBOL(ieee80211_scan_completed);
377 304
378static int ieee80211_start_sw_scan(struct ieee80211_local *local) 305static int ieee80211_start_sw_scan(struct ieee80211_local *local)
379{ 306{
380 struct ieee80211_sub_if_data *sdata;
381
382 /* 307 /*
383 * Hardware/driver doesn't support hw_scan, so use software 308 * Hardware/driver doesn't support hw_scan, so use software
384 * scanning instead. First send a nullfunc frame with power save 309 * scanning instead. First send a nullfunc frame with power save
@@ -394,33 +319,15 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
394 */ 319 */
395 drv_sw_scan_start(local); 320 drv_sw_scan_start(local);
396 321
397 mutex_lock(&local->iflist_mtx); 322 ieee80211_offchannel_stop_beaconing(local);
398 list_for_each_entry(sdata, &local->interfaces, list) {
399 if (!netif_running(sdata->dev))
400 continue;
401
402 /* disable beaconing */
403 if (sdata->vif.type == NL80211_IFTYPE_AP ||
404 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
405 sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
406 ieee80211_bss_info_change_notify(
407 sdata, BSS_CHANGED_BEACON_ENABLED);
408
409 /*
410 * only handle non-STA interfaces here, STA interfaces
411 * are handled in the scan state machine
412 */
413 if (sdata->vif.type != NL80211_IFTYPE_STATION)
414 netif_tx_stop_all_queues(sdata->dev);
415 }
416 mutex_unlock(&local->iflist_mtx);
417 323
418 local->next_scan_state = SCAN_DECISION; 324 local->next_scan_state = SCAN_DECISION;
419 local->scan_channel_idx = 0; 325 local->scan_channel_idx = 0;
420 326
327 drv_flush(local, false);
328
421 ieee80211_configure_filter(local); 329 ieee80211_configure_filter(local);
422 330
423 /* TODO: start scan as soon as all nullfunc frames are ACKed */
424 ieee80211_queue_delayed_work(&local->hw, 331 ieee80211_queue_delayed_work(&local->hw,
425 &local->scan_work, 332 &local->scan_work,
426 IEEE80211_CHANNEL_TIME); 333 IEEE80211_CHANNEL_TIME);
@@ -433,17 +340,13 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
433 struct cfg80211_scan_request *req) 340 struct cfg80211_scan_request *req)
434{ 341{
435 struct ieee80211_local *local = sdata->local; 342 struct ieee80211_local *local = sdata->local;
436 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
437 int rc; 343 int rc;
438 344
439 if (local->scan_req) 345 if (local->scan_req)
440 return -EBUSY; 346 return -EBUSY;
441 347
442 if (req != local->int_scan_req && 348 if (!list_empty(&local->work_list)) {
443 sdata->vif.type == NL80211_IFTYPE_STATION && 349 /* wait for the work to finish/time out */
444 !list_empty(&ifmgd->work_list)) {
445 /* actually wait for the work it's doing to finish/time out */
446 set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
447 local->scan_req = req; 350 local->scan_req = req;
448 local->scan_sdata = sdata; 351 local->scan_sdata = sdata;
449 return 0; 352 return 0;
@@ -468,6 +371,14 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
468 local->hw_scan_req->ie = ies; 371 local->hw_scan_req->ie = ies;
469 372
470 local->hw_scan_band = 0; 373 local->hw_scan_band = 0;
374
375 /*
376 * After allocating local->hw_scan_req, we must
377 * go through until ieee80211_prep_hw_scan(), so
378 * anything that might be changed here and leave
379 * this function early must not go after this
380 * allocation.
381 */
471 } 382 }
472 383
473 local->scan_req = req; 384 local->scan_req = req;
@@ -477,15 +388,16 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
477 __set_bit(SCAN_HW_SCANNING, &local->scanning); 388 __set_bit(SCAN_HW_SCANNING, &local->scanning);
478 else 389 else
479 __set_bit(SCAN_SW_SCANNING, &local->scanning); 390 __set_bit(SCAN_SW_SCANNING, &local->scanning);
391
480 /* 392 /*
481 * Kicking off the scan need not be protected, 393 * Kicking off the scan need not be protected,
482 * only the scan variable stuff, since now 394 * only the scan variable stuff, since now
483 * local->scan_req is assigned and other callers 395 * local->scan_req is assigned and other callers
484 * will abort their scan attempts. 396 * will abort their scan attempts.
485 * 397 *
486 * This avoids getting a scan_mtx -> iflist_mtx 398 * This avoids too many locking dependencies
487 * dependency, so that the scan completed calls 399 * so that the scan completed calls have more
488 * have more locking freedom. 400 * locking freedom.
489 */ 401 */
490 402
491 ieee80211_recalc_idle(local); 403 ieee80211_recalc_idle(local);
@@ -528,7 +440,7 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
528 /* check if at least one STA interface is associated */ 440 /* check if at least one STA interface is associated */
529 mutex_lock(&local->iflist_mtx); 441 mutex_lock(&local->iflist_mtx);
530 list_for_each_entry(sdata, &local->interfaces, list) { 442 list_for_each_entry(sdata, &local->interfaces, list) {
531 if (!netif_running(sdata->dev)) 443 if (!ieee80211_sdata_running(sdata))
532 continue; 444 continue;
533 445
534 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 446 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
@@ -566,56 +478,35 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
566static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local, 478static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
567 unsigned long *next_delay) 479 unsigned long *next_delay)
568{ 480{
569 struct ieee80211_sub_if_data *sdata; 481 ieee80211_offchannel_stop_station(local);
482
483 __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
570 484
571 /* 485 /*
572 * notify the AP about us leaving the channel and stop all STA interfaces 486 * What if the nullfunc frames didn't arrive?
573 */ 487 */
574 mutex_lock(&local->iflist_mtx); 488 drv_flush(local, false);
575 list_for_each_entry(sdata, &local->interfaces, list) { 489 if (local->ops->flush)
576 if (!netif_running(sdata->dev)) 490 *next_delay = 0;
577 continue; 491 else
578 492 *next_delay = HZ / 10;
579 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
580 netif_tx_stop_all_queues(sdata->dev);
581 if (sdata->u.mgd.associated)
582 ieee80211_scan_ps_enable(sdata);
583 }
584 }
585 mutex_unlock(&local->iflist_mtx);
586
587 __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
588 493
589 /* advance to the next channel to be scanned */ 494 /* advance to the next channel to be scanned */
590 *next_delay = HZ / 10;
591 local->next_scan_state = SCAN_SET_CHANNEL; 495 local->next_scan_state = SCAN_SET_CHANNEL;
592} 496}
593 497
594static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local, 498static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local,
595 unsigned long *next_delay) 499 unsigned long *next_delay)
596{ 500{
597 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
598
599 /* switch back to the operating channel */ 501 /* switch back to the operating channel */
600 local->scan_channel = NULL; 502 local->scan_channel = NULL;
601 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 503 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
602 504
603 /* 505 /*
604 * notify the AP about us being back and restart all STA interfaces 506 * Only re-enable station mode interface now; beaconing will be
507 * re-enabled once the full scan has been completed.
605 */ 508 */
606 mutex_lock(&local->iflist_mtx); 509 ieee80211_offchannel_return(local, false);
607 list_for_each_entry(sdata, &local->interfaces, list) {
608 if (!netif_running(sdata->dev))
609 continue;
610
611 /* Tell AP we're back */
612 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
613 if (sdata->u.mgd.associated)
614 ieee80211_scan_ps_disable(sdata);
615 netif_tx_wake_all_queues(sdata->dev);
616 }
617 }
618 mutex_unlock(&local->iflist_mtx);
619 510
620 __clear_bit(SCAN_OFF_CHANNEL, &local->scanning); 511 __clear_bit(SCAN_OFF_CHANNEL, &local->scanning);
621 512
@@ -729,7 +620,7 @@ void ieee80211_scan_work(struct work_struct *work)
729 /* 620 /*
730 * Avoid re-scheduling when the sdata is going away. 621 * Avoid re-scheduling when the sdata is going away.
731 */ 622 */
732 if (!netif_running(sdata->dev)) { 623 if (!ieee80211_sdata_running(sdata)) {
733 ieee80211_scan_completed(&local->hw, true); 624 ieee80211_scan_completed(&local->hw, true);
734 return; 625 return;
735 } 626 }
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index aa743a895cf..7733f66ee2c 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -35,7 +35,7 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
35 35
36 if (!skb) { 36 if (!skb) {
37 printk(KERN_ERR "%s: failed to allocate buffer for " 37 printk(KERN_ERR "%s: failed to allocate buffer for "
38 "measurement report frame\n", sdata->dev->name); 38 "measurement report frame\n", sdata->name);
39 return; 39 return;
40 } 40 }
41 41
@@ -43,7 +43,7 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
43 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24); 43 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
44 memset(msr_report, 0, 24); 44 memset(msr_report, 0, 24);
45 memcpy(msr_report->da, da, ETH_ALEN); 45 memcpy(msr_report->da, da, ETH_ALEN);
46 memcpy(msr_report->sa, sdata->dev->dev_addr, ETH_ALEN); 46 memcpy(msr_report->sa, sdata->vif.addr, ETH_ALEN);
47 memcpy(msr_report->bssid, bssid, ETH_ALEN); 47 memcpy(msr_report->bssid, bssid, ETH_ALEN);
48 msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 48 msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
49 IEEE80211_STYPE_ACTION); 49 IEEE80211_STYPE_ACTION);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 71f370dd24b..211c475f73c 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -32,49 +32,33 @@
32 * for faster lookup and a list for iteration. They are managed using 32 * for faster lookup and a list for iteration. They are managed using
33 * RCU, i.e. access to the list and hash table is protected by RCU. 33 * RCU, i.e. access to the list and hash table is protected by RCU.
34 * 34 *
35 * Upon allocating a STA info structure with sta_info_alloc(), the caller owns 35 * Upon allocating a STA info structure with sta_info_alloc(), the caller
36 * that structure. It must then either destroy it using sta_info_destroy() 36 * owns that structure. It must then insert it into the hash table using
37 * (which is pretty useless) or insert it into the hash table using 37 * either sta_info_insert() or sta_info_insert_rcu(); only in the latter
38 * sta_info_insert() which demotes the reference from ownership to a regular 38 * case (which acquires an rcu read section but must not be called from
39 * RCU-protected reference; if the function is called without protection by an 39 * within one) will the pointer still be valid after the call. Note that
40 * RCU critical section the reference is instantly invalidated. Note that the 40 * the caller may not do much with the STA info before inserting it, in
41 * caller may not do much with the STA info before inserting it, in particular, 41 * particular, it may not start any mesh peer link management or add
42 * it may not start any mesh peer link management or add encryption keys. 42 * encryption keys.
43 * 43 *
44 * When the insertion fails (sta_info_insert()) returns non-zero), the 44 * When the insertion fails (sta_info_insert()) returns non-zero), the
45 * structure will have been freed by sta_info_insert()! 45 * structure will have been freed by sta_info_insert()!
46 * 46 *
47 * sta entries are added by mac80211 when you establish a link with a 47 * Station entries are added by mac80211 when you establish a link with a
48 * peer. This means different things for the different type of interfaces 48 * peer. This means different things for the different type of interfaces
49 * we support. For a regular station this mean we add the AP sta when we 49 * we support. For a regular station this mean we add the AP sta when we
50 * receive an assocation response from the AP. For IBSS this occurs when 50 * receive an assocation response from the AP. For IBSS this occurs when
51 * we receive a probe response or a beacon from target IBSS network. For 51 * get to know about a peer on the same IBSS. For WDS we add the sta for
52 * WDS we add the sta for the peer imediately upon device open. When using 52 * the peer imediately upon device open. When using AP mode we add stations
53 * AP mode we add stations for each respective station upon request from 53 * for each respective station upon request from userspace through nl80211.
54 * userspace through nl80211.
55 * 54 *
56 * Because there are debugfs entries for each station, and adding those 55 * In order to remove a STA info structure, various sta_info_destroy_*()
57 * must be able to sleep, it is also possible to "pin" a station entry, 56 * calls are available.
58 * that means it can be removed from the hash table but not be freed.
59 * See the comment in __sta_info_unlink() for more information, this is
60 * an internal capability only.
61 * 57 *
62 * In order to remove a STA info structure, the caller needs to first 58 * There is no concept of ownership on a STA entry, each structure is
63 * unlink it (sta_info_unlink()) from the list and hash tables and 59 * owned by the global hash table/list until it is removed. All users of
64 * then destroy it; sta_info_destroy() will wait for an RCU grace period 60 * the structure need to be RCU protected so that the structure won't be
65 * to elapse before actually freeing it. Due to the pinning and the 61 * freed before they are done using it.
66 * possibility of multiple callers trying to remove the same STA info at
67 * the same time, sta_info_unlink() can clear the STA info pointer it is
68 * passed to indicate that the STA info is owned by somebody else now.
69 *
70 * If sta_info_unlink() did not clear the pointer then the caller owns
71 * the STA info structure now and is responsible of destroying it with
72 * a call to sta_info_destroy().
73 *
74 * In all other cases, there is no concept of ownership on a STA entry,
75 * each structure is owned by the global hash table/list until it is
76 * removed. All users of the structure need to be RCU protected so that
77 * the structure won't be freed before they are done using it.
78 */ 62 */
79 63
80/* Caller must hold local->sta_lock */ 64/* Caller must hold local->sta_lock */
@@ -103,13 +87,37 @@ static int sta_info_hash_del(struct ieee80211_local *local,
103} 87}
104 88
105/* protected by RCU */ 89/* protected by RCU */
106struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr) 90struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
91 const u8 *addr)
107{ 92{
93 struct ieee80211_local *local = sdata->local;
108 struct sta_info *sta; 94 struct sta_info *sta;
109 95
110 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); 96 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
111 while (sta) { 97 while (sta) {
112 if (memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 98 if (sta->sdata == sdata &&
99 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
100 break;
101 sta = rcu_dereference(sta->hnext);
102 }
103 return sta;
104}
105
106/*
107 * Get sta info either from the specified interface
108 * or from one of its vlans
109 */
110struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
111 const u8 *addr)
112{
113 struct ieee80211_local *local = sdata->local;
114 struct sta_info *sta;
115
116 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
117 while (sta) {
118 if ((sta->sdata == sdata ||
119 sta->sdata->bss == sdata->bss) &&
120 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
113 break; 121 break;
114 sta = rcu_dereference(sta->hnext); 122 sta = rcu_dereference(sta->hnext);
115 } 123 }
@@ -161,101 +169,6 @@ static void __sta_info_free(struct ieee80211_local *local,
161 kfree(sta); 169 kfree(sta);
162} 170}
163 171
164void sta_info_destroy(struct sta_info *sta)
165{
166 struct ieee80211_local *local;
167 struct sk_buff *skb;
168 int i;
169
170 might_sleep();
171
172 if (!sta)
173 return;
174
175 local = sta->local;
176
177 cancel_work_sync(&sta->drv_unblock_wk);
178
179 rate_control_remove_sta_debugfs(sta);
180 ieee80211_sta_debugfs_remove(sta);
181
182#ifdef CONFIG_MAC80211_MESH
183 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
184 mesh_plink_deactivate(sta);
185#endif
186
187 /*
188 * We have only unlinked the key, and actually destroying it
189 * may mean it is removed from hardware which requires that
190 * the key->sta pointer is still valid, so flush the key todo
191 * list here.
192 *
193 * ieee80211_key_todo() will synchronize_rcu() so after this
194 * nothing can reference this sta struct any more.
195 */
196 ieee80211_key_todo();
197
198#ifdef CONFIG_MAC80211_MESH
199 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
200 del_timer_sync(&sta->plink_timer);
201#endif
202
203 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
204 local->total_ps_buffered--;
205 dev_kfree_skb_any(skb);
206 }
207
208 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
209 dev_kfree_skb_any(skb);
210
211 for (i = 0; i < STA_TID_NUM; i++) {
212 struct tid_ampdu_rx *tid_rx;
213 struct tid_ampdu_tx *tid_tx;
214
215 spin_lock_bh(&sta->lock);
216 tid_rx = sta->ampdu_mlme.tid_rx[i];
217 /* Make sure timer won't free the tid_rx struct, see below */
218 if (tid_rx)
219 tid_rx->shutdown = true;
220
221 spin_unlock_bh(&sta->lock);
222
223 /*
224 * Outside spinlock - shutdown is true now so that the timer
225 * won't free tid_rx, we have to do that now. Can't let the
226 * timer do it because we have to sync the timer outside the
227 * lock that it takes itself.
228 */
229 if (tid_rx) {
230 del_timer_sync(&tid_rx->session_timer);
231 kfree(tid_rx);
232 }
233
234 /*
235 * No need to do such complications for TX agg sessions, the
236 * path leading to freeing the tid_tx struct goes via a call
237 * from the driver, and thus needs to look up the sta struct
238 * again, which cannot be found when we get here. Hence, we
239 * just need to delete the timer and free the aggregation
240 * info; we won't be telling the peer about it then but that
241 * doesn't matter if we're not talking to it again anyway.
242 */
243 tid_tx = sta->ampdu_mlme.tid_tx[i];
244 if (tid_tx) {
245 del_timer_sync(&tid_tx->addba_resp_timer);
246 /*
247 * STA removed while aggregation session being
248 * started? Bit odd, but purge frames anyway.
249 */
250 skb_queue_purge(&tid_tx->pending);
251 kfree(tid_tx);
252 }
253 }
254
255 __sta_info_free(local, sta);
256}
257
258
259/* Caller must hold local->sta_lock */ 172/* Caller must hold local->sta_lock */
260static void sta_info_hash_add(struct ieee80211_local *local, 173static void sta_info_hash_add(struct ieee80211_local *local,
261 struct sta_info *sta) 174 struct sta_info *sta)
@@ -352,7 +265,93 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
352 return sta; 265 return sta;
353} 266}
354 267
355int sta_info_insert(struct sta_info *sta) 268static int sta_info_finish_insert(struct sta_info *sta, bool async)
269{
270 struct ieee80211_local *local = sta->local;
271 struct ieee80211_sub_if_data *sdata = sta->sdata;
272 struct station_info sinfo;
273 unsigned long flags;
274 int err = 0;
275
276 WARN_ON(!mutex_is_locked(&local->sta_mtx));
277
278 /* notify driver */
279 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
280 sdata = container_of(sdata->bss,
281 struct ieee80211_sub_if_data,
282 u.ap);
283 err = drv_sta_add(local, sdata, &sta->sta);
284 if (err) {
285 if (!async)
286 return err;
287 printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to driver (%d)"
288 " - keeping it anyway.\n",
289 sdata->name, sta->sta.addr, err);
290 } else {
291 sta->uploaded = true;
292#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
293 if (async)
294 printk(KERN_DEBUG "%s: Finished adding IBSS STA %pM\n",
295 wiphy_name(local->hw.wiphy), sta->sta.addr);
296#endif
297 }
298
299 sdata = sta->sdata;
300
301 if (!async) {
302 local->num_sta++;
303 local->sta_generation++;
304 smp_mb();
305
306 /* make the station visible */
307 spin_lock_irqsave(&local->sta_lock, flags);
308 sta_info_hash_add(local, sta);
309 spin_unlock_irqrestore(&local->sta_lock, flags);
310 }
311
312 list_add(&sta->list, &local->sta_list);
313
314 ieee80211_sta_debugfs_add(sta);
315 rate_control_add_sta_debugfs(sta);
316
317 sinfo.filled = 0;
318 sinfo.generation = local->sta_generation;
319 cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
320
321
322 return 0;
323}
324
325static void sta_info_finish_pending(struct ieee80211_local *local)
326{
327 struct sta_info *sta;
328 unsigned long flags;
329
330 spin_lock_irqsave(&local->sta_lock, flags);
331 while (!list_empty(&local->sta_pending_list)) {
332 sta = list_first_entry(&local->sta_pending_list,
333 struct sta_info, list);
334 list_del(&sta->list);
335 spin_unlock_irqrestore(&local->sta_lock, flags);
336
337 sta_info_finish_insert(sta, true);
338
339 spin_lock_irqsave(&local->sta_lock, flags);
340 }
341 spin_unlock_irqrestore(&local->sta_lock, flags);
342}
343
344static void sta_info_finish_work(struct work_struct *work)
345{
346 struct ieee80211_local *local =
347 container_of(work, struct ieee80211_local, sta_finish_work);
348
349 mutex_lock(&local->sta_mtx);
350 sta_info_finish_pending(local);
351 mutex_unlock(&local->sta_mtx);
352}
353
354int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
356{ 355{
357 struct ieee80211_local *local = sta->local; 356 struct ieee80211_local *local = sta->local;
358 struct ieee80211_sub_if_data *sdata = sta->sdata; 357 struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -364,38 +363,89 @@ int sta_info_insert(struct sta_info *sta)
364 * something inserts a STA (on one CPU) without holding the RTNL 363 * something inserts a STA (on one CPU) without holding the RTNL
365 * and another CPU turns off the net device. 364 * and another CPU turns off the net device.
366 */ 365 */
367 if (unlikely(!netif_running(sdata->dev))) { 366 if (unlikely(!ieee80211_sdata_running(sdata))) {
368 err = -ENETDOWN; 367 err = -ENETDOWN;
368 rcu_read_lock();
369 goto out_free; 369 goto out_free;
370 } 370 }
371 371
372 if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->dev->dev_addr) == 0 || 372 if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 ||
373 is_multicast_ether_addr(sta->sta.addr))) { 373 is_multicast_ether_addr(sta->sta.addr))) {
374 err = -EINVAL; 374 err = -EINVAL;
375 rcu_read_lock();
375 goto out_free; 376 goto out_free;
376 } 377 }
377 378
379 /*
380 * In ad-hoc mode, we sometimes need to insert stations
381 * from tasklet context from the RX path. To avoid races,
382 * always do so in that case -- see the comment below.
383 */
384 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
385 spin_lock_irqsave(&local->sta_lock, flags);
386 /* check if STA exists already */
387 if (sta_info_get_bss(sdata, sta->sta.addr)) {
388 spin_unlock_irqrestore(&local->sta_lock, flags);
389 rcu_read_lock();
390 err = -EEXIST;
391 goto out_free;
392 }
393
394 local->num_sta++;
395 local->sta_generation++;
396 smp_mb();
397 sta_info_hash_add(local, sta);
398
399 list_add_tail(&sta->list, &local->sta_pending_list);
400
401 rcu_read_lock();
402 spin_unlock_irqrestore(&local->sta_lock, flags);
403
404#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
405 printk(KERN_DEBUG "%s: Added IBSS STA %pM\n",
406 wiphy_name(local->hw.wiphy), sta->sta.addr);
407#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
408
409 ieee80211_queue_work(&local->hw, &local->sta_finish_work);
410
411 return 0;
412 }
413
414 /*
415 * On first glance, this will look racy, because the code
416 * below this point, which inserts a station with sleeping,
417 * unlocks the sta_lock between checking existence in the
418 * hash table and inserting into it.
419 *
420 * However, it is not racy against itself because it keeps
421 * the mutex locked. It still seems to race against the
422 * above code that atomically inserts the station... That,
423 * however, is not true because the above code can only
424 * be invoked for IBSS interfaces, and the below code will
425 * not be -- and the two do not race against each other as
426 * the hash table also keys off the interface.
427 */
428
429 might_sleep();
430
431 mutex_lock(&local->sta_mtx);
432
378 spin_lock_irqsave(&local->sta_lock, flags); 433 spin_lock_irqsave(&local->sta_lock, flags);
379 /* check if STA exists already */ 434 /* check if STA exists already */
380 if (sta_info_get(local, sta->sta.addr)) { 435 if (sta_info_get_bss(sdata, sta->sta.addr)) {
381 spin_unlock_irqrestore(&local->sta_lock, flags); 436 spin_unlock_irqrestore(&local->sta_lock, flags);
437 rcu_read_lock();
382 err = -EEXIST; 438 err = -EEXIST;
383 goto out_free; 439 goto out_free;
384 } 440 }
385 list_add(&sta->list, &local->sta_list);
386 local->sta_generation++;
387 local->num_sta++;
388 sta_info_hash_add(local, sta);
389 441
390 /* notify driver */ 442 spin_unlock_irqrestore(&local->sta_lock, flags);
391 if (local->ops->sta_notify) {
392 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
393 sdata = container_of(sdata->bss,
394 struct ieee80211_sub_if_data,
395 u.ap);
396 443
397 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD, &sta->sta); 444 err = sta_info_finish_insert(sta, false);
398 sdata = sta->sdata; 445 if (err) {
446 mutex_unlock(&local->sta_mtx);
447 rcu_read_lock();
448 goto out_free;
399 } 449 }
400 450
401#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 451#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -403,18 +453,9 @@ int sta_info_insert(struct sta_info *sta)
403 wiphy_name(local->hw.wiphy), sta->sta.addr); 453 wiphy_name(local->hw.wiphy), sta->sta.addr);
404#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 454#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
405 455
406 spin_unlock_irqrestore(&local->sta_lock, flags); 456 /* move reference to rcu-protected */
407 457 rcu_read_lock();
408#ifdef CONFIG_MAC80211_DEBUGFS 458 mutex_unlock(&local->sta_mtx);
409 /*
410 * Debugfs entry adding might sleep, so schedule process
411 * context task for adding entry for STAs that do not yet
412 * have one.
413 * NOTE: due to auto-freeing semantics this may only be done
414 * if the insertion is successful!
415 */
416 schedule_work(&local->sta_debugfs_add);
417#endif
418 459
419 if (ieee80211_vif_is_mesh(&sdata->vif)) 460 if (ieee80211_vif_is_mesh(&sdata->vif))
420 mesh_accept_plinks_update(sdata); 461 mesh_accept_plinks_update(sdata);
@@ -426,6 +467,15 @@ int sta_info_insert(struct sta_info *sta)
426 return err; 467 return err;
427} 468}
428 469
470int sta_info_insert(struct sta_info *sta)
471{
472 int err = sta_info_insert_rcu(sta);
473
474 rcu_read_unlock();
475
476 return err;
477}
478
429static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid) 479static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid)
430{ 480{
431 /* 481 /*
@@ -494,108 +544,6 @@ void sta_info_clear_tim_bit(struct sta_info *sta)
494 spin_unlock_irqrestore(&sta->local->sta_lock, flags); 544 spin_unlock_irqrestore(&sta->local->sta_lock, flags);
495} 545}
496 546
497static void __sta_info_unlink(struct sta_info **sta)
498{
499 struct ieee80211_local *local = (*sta)->local;
500 struct ieee80211_sub_if_data *sdata = (*sta)->sdata;
501 /*
502 * pull caller's reference if we're already gone.
503 */
504 if (sta_info_hash_del(local, *sta)) {
505 *sta = NULL;
506 return;
507 }
508
509 if ((*sta)->key) {
510 ieee80211_key_free((*sta)->key);
511 WARN_ON((*sta)->key);
512 }
513
514 list_del(&(*sta)->list);
515 (*sta)->dead = true;
516
517 if (test_and_clear_sta_flags(*sta,
518 WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) {
519 BUG_ON(!sdata->bss);
520
521 atomic_dec(&sdata->bss->num_sta_ps);
522 __sta_info_clear_tim_bit(sdata->bss, *sta);
523 }
524
525 local->num_sta--;
526 local->sta_generation++;
527
528 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
529 rcu_assign_pointer(sdata->u.vlan.sta, NULL);
530
531 if (local->ops->sta_notify) {
532 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
533 sdata = container_of(sdata->bss,
534 struct ieee80211_sub_if_data,
535 u.ap);
536
537 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE,
538 &(*sta)->sta);
539 sdata = (*sta)->sdata;
540 }
541
542 if (ieee80211_vif_is_mesh(&sdata->vif)) {
543 mesh_accept_plinks_update(sdata);
544#ifdef CONFIG_MAC80211_MESH
545 del_timer(&(*sta)->plink_timer);
546#endif
547 }
548
549#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
550 printk(KERN_DEBUG "%s: Removed STA %pM\n",
551 wiphy_name(local->hw.wiphy), (*sta)->sta.addr);
552#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
553
554 /*
555 * Finally, pull caller's reference if the STA is pinned by the
556 * task that is adding the debugfs entries. In that case, we
557 * leave the STA "to be freed".
558 *
559 * The rules are not trivial, but not too complex either:
560 * (1) pin_status is only modified under the sta_lock
561 * (2) STAs may only be pinned under the RTNL so that
562 * sta_info_flush() is guaranteed to actually destroy
563 * all STAs that are active for a given interface, this
564 * is required for correctness because otherwise we
565 * could notify a driver that an interface is going
566 * away and only after that (!) notify it about a STA
567 * on that interface going away.
568 * (3) sta_info_debugfs_add_work() will set the status
569 * to PINNED when it found an item that needs a new
570 * debugfs directory created. In that case, that item
571 * must not be freed although all *RCU* users are done
572 * with it. Hence, we tell the caller of _unlink()
573 * that the item is already gone (as can happen when
574 * two tasks try to unlink/destroy at the same time)
575 * (4) We set the pin_status to DESTROY here when we
576 * find such an item.
577 * (5) sta_info_debugfs_add_work() will reset the pin_status
578 * from PINNED to NORMAL when it is done with the item,
579 * but will check for DESTROY before resetting it in
580 * which case it will free the item.
581 */
582 if ((*sta)->pin_status == STA_INFO_PIN_STAT_PINNED) {
583 (*sta)->pin_status = STA_INFO_PIN_STAT_DESTROY;
584 *sta = NULL;
585 return;
586 }
587}
588
589void sta_info_unlink(struct sta_info **sta)
590{
591 struct ieee80211_local *local = (*sta)->local;
592 unsigned long flags;
593
594 spin_lock_irqsave(&local->sta_lock, flags);
595 __sta_info_unlink(sta);
596 spin_unlock_irqrestore(&local->sta_lock, flags);
597}
598
599static int sta_info_buffer_expired(struct sta_info *sta, 547static int sta_info_buffer_expired(struct sta_info *sta,
600 struct sk_buff *skb) 548 struct sk_buff *skb)
601{ 549{
@@ -652,109 +600,209 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
652 } 600 }
653} 601}
654 602
655 603static int __must_check __sta_info_destroy(struct sta_info *sta)
656static void sta_info_cleanup(unsigned long data)
657{ 604{
658 struct ieee80211_local *local = (struct ieee80211_local *) data; 605 struct ieee80211_local *local;
659 struct sta_info *sta; 606 struct ieee80211_sub_if_data *sdata;
607 struct sk_buff *skb;
608 unsigned long flags;
609 int ret, i;
660 610
661 rcu_read_lock(); 611 might_sleep();
662 list_for_each_entry_rcu(sta, &local->sta_list, list)
663 sta_info_cleanup_expire_buffered(local, sta);
664 rcu_read_unlock();
665 612
666 if (local->quiescing) 613 if (!sta)
667 return; 614 return -ENOENT;
668 615
669 local->sta_cleanup.expires = 616 local = sta->local;
670 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); 617 sdata = sta->sdata;
671 add_timer(&local->sta_cleanup);
672}
673 618
674#ifdef CONFIG_MAC80211_DEBUGFS 619 spin_lock_irqsave(&local->sta_lock, flags);
675/* 620 ret = sta_info_hash_del(local, sta);
676 * See comment in __sta_info_unlink, 621 /* this might still be the pending list ... which is fine */
677 * caller must hold local->sta_lock. 622 if (!ret)
678 */ 623 list_del(&sta->list);
679static void __sta_info_pin(struct sta_info *sta) 624 spin_unlock_irqrestore(&local->sta_lock, flags);
680{ 625 if (ret)
681 WARN_ON(sta->pin_status != STA_INFO_PIN_STAT_NORMAL); 626 return ret;
682 sta->pin_status = STA_INFO_PIN_STAT_PINNED; 627
628 if (sta->key) {
629 ieee80211_key_free(sta->key);
630 /*
631 * We have only unlinked the key, and actually destroying it
632 * may mean it is removed from hardware which requires that
633 * the key->sta pointer is still valid, so flush the key todo
634 * list here.
635 *
636 * ieee80211_key_todo() will synchronize_rcu() so after this
637 * nothing can reference this sta struct any more.
638 */
639 ieee80211_key_todo();
640
641 WARN_ON(sta->key);
642 }
643
644 sta->dead = true;
645
646 if (test_and_clear_sta_flags(sta,
647 WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) {
648 BUG_ON(!sdata->bss);
649
650 atomic_dec(&sdata->bss->num_sta_ps);
651 __sta_info_clear_tim_bit(sdata->bss, sta);
652 }
653
654 local->num_sta--;
655 local->sta_generation++;
656
657 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
658 rcu_assign_pointer(sdata->u.vlan.sta, NULL);
659
660 if (sta->uploaded) {
661 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
662 sdata = container_of(sdata->bss,
663 struct ieee80211_sub_if_data,
664 u.ap);
665 drv_sta_remove(local, sdata, &sta->sta);
666 sdata = sta->sdata;
667 }
668
669#ifdef CONFIG_MAC80211_MESH
670 if (ieee80211_vif_is_mesh(&sdata->vif)) {
671 mesh_accept_plinks_update(sdata);
672 del_timer(&sta->plink_timer);
673 }
674#endif
675
676#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
677 printk(KERN_DEBUG "%s: Removed STA %pM\n",
678 wiphy_name(local->hw.wiphy), sta->sta.addr);
679#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
680 cancel_work_sync(&sta->drv_unblock_wk);
681
682 rate_control_remove_sta_debugfs(sta);
683 ieee80211_sta_debugfs_remove(sta);
684
685#ifdef CONFIG_MAC80211_MESH
686 if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
687 mesh_plink_deactivate(sta);
688 del_timer_sync(&sta->plink_timer);
689 }
690#endif
691
692 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
693 local->total_ps_buffered--;
694 dev_kfree_skb_any(skb);
695 }
696
697 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
698 dev_kfree_skb_any(skb);
699
700 for (i = 0; i < STA_TID_NUM; i++) {
701 struct tid_ampdu_rx *tid_rx;
702 struct tid_ampdu_tx *tid_tx;
703
704 spin_lock_bh(&sta->lock);
705 tid_rx = sta->ampdu_mlme.tid_rx[i];
706 /* Make sure timer won't free the tid_rx struct, see below */
707 if (tid_rx)
708 tid_rx->shutdown = true;
709
710 spin_unlock_bh(&sta->lock);
711
712 /*
713 * Outside spinlock - shutdown is true now so that the timer
714 * won't free tid_rx, we have to do that now. Can't let the
715 * timer do it because we have to sync the timer outside the
716 * lock that it takes itself.
717 */
718 if (tid_rx) {
719 del_timer_sync(&tid_rx->session_timer);
720 kfree(tid_rx);
721 }
722
723 /*
724 * No need to do such complications for TX agg sessions, the
725 * path leading to freeing the tid_tx struct goes via a call
726 * from the driver, and thus needs to look up the sta struct
727 * again, which cannot be found when we get here. Hence, we
728 * just need to delete the timer and free the aggregation
729 * info; we won't be telling the peer about it then but that
730 * doesn't matter if we're not talking to it again anyway.
731 */
732 tid_tx = sta->ampdu_mlme.tid_tx[i];
733 if (tid_tx) {
734 del_timer_sync(&tid_tx->addba_resp_timer);
735 /*
736 * STA removed while aggregation session being
737 * started? Bit odd, but purge frames anyway.
738 */
739 skb_queue_purge(&tid_tx->pending);
740 kfree(tid_tx);
741 }
742 }
743
744 __sta_info_free(local, sta);
745
746 return 0;
683} 747}
684 748
685/* 749int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr)
686 * See comment in __sta_info_unlink, returns sta if it
687 * needs to be destroyed.
688 */
689static struct sta_info *__sta_info_unpin(struct sta_info *sta)
690{ 750{
691 struct sta_info *ret = NULL; 751 struct sta_info *sta;
692 unsigned long flags; 752 int ret;
693 753
694 spin_lock_irqsave(&sta->local->sta_lock, flags); 754 mutex_lock(&sdata->local->sta_mtx);
695 WARN_ON(sta->pin_status != STA_INFO_PIN_STAT_DESTROY && 755 sta = sta_info_get(sdata, addr);
696 sta->pin_status != STA_INFO_PIN_STAT_PINNED); 756 ret = __sta_info_destroy(sta);
697 if (sta->pin_status == STA_INFO_PIN_STAT_DESTROY) 757 mutex_unlock(&sdata->local->sta_mtx);
698 ret = sta;
699 sta->pin_status = STA_INFO_PIN_STAT_NORMAL;
700 spin_unlock_irqrestore(&sta->local->sta_lock, flags);
701 758
702 return ret; 759 return ret;
703} 760}
704 761
705static void sta_info_debugfs_add_work(struct work_struct *work) 762int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
763 const u8 *addr)
706{ 764{
707 struct ieee80211_local *local = 765 struct sta_info *sta;
708 container_of(work, struct ieee80211_local, sta_debugfs_add); 766 int ret;
709 struct sta_info *sta, *tmp;
710 unsigned long flags;
711 767
712 /* We need to keep the RTNL across the whole pinned status. */ 768 mutex_lock(&sdata->local->sta_mtx);
713 rtnl_lock(); 769 sta = sta_info_get_bss(sdata, addr);
714 while (1) { 770 ret = __sta_info_destroy(sta);
715 sta = NULL; 771 mutex_unlock(&sdata->local->sta_mtx);
716 772
717 spin_lock_irqsave(&local->sta_lock, flags); 773 return ret;
718 list_for_each_entry(tmp, &local->sta_list, list) { 774}
719 /*
720 * debugfs.add_has_run will be set by
721 * ieee80211_sta_debugfs_add regardless
722 * of what else it does.
723 */
724 if (!tmp->debugfs.add_has_run) {
725 sta = tmp;
726 __sta_info_pin(sta);
727 break;
728 }
729 }
730 spin_unlock_irqrestore(&local->sta_lock, flags);
731 775
732 if (!sta) 776static void sta_info_cleanup(unsigned long data)
733 break; 777{
778 struct ieee80211_local *local = (struct ieee80211_local *) data;
779 struct sta_info *sta;
734 780
735 ieee80211_sta_debugfs_add(sta); 781 rcu_read_lock();
736 rate_control_add_sta_debugfs(sta); 782 list_for_each_entry_rcu(sta, &local->sta_list, list)
783 sta_info_cleanup_expire_buffered(local, sta);
784 rcu_read_unlock();
737 785
738 sta = __sta_info_unpin(sta); 786 if (local->quiescing)
739 sta_info_destroy(sta); 787 return;
740 } 788
741 rtnl_unlock(); 789 local->sta_cleanup.expires =
790 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
791 add_timer(&local->sta_cleanup);
742} 792}
743#endif
744 793
745void sta_info_init(struct ieee80211_local *local) 794void sta_info_init(struct ieee80211_local *local)
746{ 795{
747 spin_lock_init(&local->sta_lock); 796 spin_lock_init(&local->sta_lock);
797 mutex_init(&local->sta_mtx);
748 INIT_LIST_HEAD(&local->sta_list); 798 INIT_LIST_HEAD(&local->sta_list);
799 INIT_LIST_HEAD(&local->sta_pending_list);
800 INIT_WORK(&local->sta_finish_work, sta_info_finish_work);
749 801
750 setup_timer(&local->sta_cleanup, sta_info_cleanup, 802 setup_timer(&local->sta_cleanup, sta_info_cleanup,
751 (unsigned long)local); 803 (unsigned long)local);
752 local->sta_cleanup.expires = 804 local->sta_cleanup.expires =
753 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); 805 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
754
755#ifdef CONFIG_MAC80211_DEBUGFS
756 INIT_WORK(&local->sta_debugfs_add, sta_info_debugfs_add_work);
757#endif
758} 806}
759 807
760int sta_info_start(struct ieee80211_local *local) 808int sta_info_start(struct ieee80211_local *local)
@@ -766,16 +814,6 @@ int sta_info_start(struct ieee80211_local *local)
766void sta_info_stop(struct ieee80211_local *local) 814void sta_info_stop(struct ieee80211_local *local)
767{ 815{
768 del_timer(&local->sta_cleanup); 816 del_timer(&local->sta_cleanup);
769#ifdef CONFIG_MAC80211_DEBUGFS
770 /*
771 * Make sure the debugfs adding work isn't pending after this
772 * because we're about to be destroyed. It doesn't matter
773 * whether it ran or not since we're going to flush all STAs
774 * anyway.
775 */
776 cancel_work_sync(&local->sta_debugfs_add);
777#endif
778
779 sta_info_flush(local, NULL); 817 sta_info_flush(local, NULL);
780} 818}
781 819
@@ -791,26 +829,19 @@ int sta_info_flush(struct ieee80211_local *local,
791 struct ieee80211_sub_if_data *sdata) 829 struct ieee80211_sub_if_data *sdata)
792{ 830{
793 struct sta_info *sta, *tmp; 831 struct sta_info *sta, *tmp;
794 LIST_HEAD(tmp_list);
795 int ret = 0; 832 int ret = 0;
796 unsigned long flags;
797 833
798 might_sleep(); 834 might_sleep();
799 835
800 spin_lock_irqsave(&local->sta_lock, flags); 836 mutex_lock(&local->sta_mtx);
837
838 sta_info_finish_pending(local);
839
801 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { 840 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
802 if (!sdata || sdata == sta->sdata) { 841 if (!sdata || sdata == sta->sdata)
803 __sta_info_unlink(&sta); 842 WARN_ON(__sta_info_destroy(sta));
804 if (sta) {
805 list_add_tail(&sta->list, &tmp_list);
806 ret++;
807 }
808 }
809 } 843 }
810 spin_unlock_irqrestore(&local->sta_lock, flags); 844 mutex_unlock(&local->sta_mtx);
811
812 list_for_each_entry_safe(sta, tmp, &tmp_list, list)
813 sta_info_destroy(sta);
814 845
815 return ret; 846 return ret;
816} 847}
@@ -820,34 +851,28 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
820{ 851{
821 struct ieee80211_local *local = sdata->local; 852 struct ieee80211_local *local = sdata->local;
822 struct sta_info *sta, *tmp; 853 struct sta_info *sta, *tmp;
823 LIST_HEAD(tmp_list);
824 unsigned long flags;
825 854
826 spin_lock_irqsave(&local->sta_lock, flags); 855 mutex_lock(&local->sta_mtx);
827 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) 856 list_for_each_entry_safe(sta, tmp, &local->sta_list, list)
828 if (time_after(jiffies, sta->last_rx + exp_time)) { 857 if (time_after(jiffies, sta->last_rx + exp_time)) {
829#ifdef CONFIG_MAC80211_IBSS_DEBUG 858#ifdef CONFIG_MAC80211_IBSS_DEBUG
830 printk(KERN_DEBUG "%s: expiring inactive STA %pM\n", 859 printk(KERN_DEBUG "%s: expiring inactive STA %pM\n",
831 sdata->dev->name, sta->sta.addr); 860 sdata->name, sta->sta.addr);
832#endif 861#endif
833 __sta_info_unlink(&sta); 862 WARN_ON(__sta_info_destroy(sta));
834 if (sta)
835 list_add(&sta->list, &tmp_list);
836 } 863 }
837 spin_unlock_irqrestore(&local->sta_lock, flags); 864 mutex_unlock(&local->sta_mtx);
838
839 list_for_each_entry_safe(sta, tmp, &tmp_list, list)
840 sta_info_destroy(sta);
841} 865}
842 866
843struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw, 867struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw,
844 const u8 *addr) 868 const u8 *addr)
845{ 869{
846 struct sta_info *sta = sta_info_get(hw_to_local(hw), addr); 870 struct sta_info *sta, *nxt;
847 871
848 if (!sta) 872 /* Just return a random station ... first in list ... */
849 return NULL; 873 for_each_sta_info(hw_to_local(hw), addr, sta, nxt)
850 return &sta->sta; 874 return &sta->sta;
875 return NULL;
851} 876}
852EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw); 877EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw);
853 878
@@ -872,7 +897,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
872 struct ieee80211_local *local = sdata->local; 897 struct ieee80211_local *local = sdata->local;
873 int sent, buffered; 898 int sent, buffered;
874 899
875 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta); 900 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
876 901
877 if (!skb_queue_empty(&sta->ps_tx_buf)) 902 if (!skb_queue_empty(&sta->ps_tx_buf))
878 sta_info_clear_tim_bit(sta); 903 sta_info_clear_tim_bit(sta);
@@ -885,7 +910,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
885 910
886#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 911#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
887 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames " 912 printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
888 "since STA not sleeping anymore\n", sdata->dev->name, 913 "since STA not sleeping anymore\n", sdata->name,
889 sta->sta.addr, sta->sta.aid, sent - buffered, buffered); 914 sta->sta.addr, sta->sta.aid, sent - buffered, buffered);
890#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 915#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
891} 916}
@@ -944,7 +969,7 @@ void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta)
944 */ 969 */
945 printk(KERN_DEBUG "%s: STA %pM sent PS Poll even " 970 printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
946 "though there are no buffered frames for it\n", 971 "though there are no buffered frames for it\n",
947 sdata->dev->name, sta->sta.addr); 972 sdata->name, sta->sta.addr);
948#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 973#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
949 } 974 }
950} 975}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index b4810f6aa94..822d8452293 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -42,6 +42,9 @@
42 * be in the queues 42 * be in the queues
43 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping 43 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
44 * station in power-save mode, reply when the driver unblocks. 44 * station in power-save mode, reply when the driver unblocks.
45 * @WLAN_STA_DISASSOC: Disassociation in progress.
46 * This is used to reject TX BA session requests when disassociation
47 * is in progress.
45 */ 48 */
46enum ieee80211_sta_info_flags { 49enum ieee80211_sta_info_flags {
47 WLAN_STA_AUTH = 1<<0, 50 WLAN_STA_AUTH = 1<<0,
@@ -57,6 +60,7 @@ enum ieee80211_sta_info_flags {
57 WLAN_STA_SUSPEND = 1<<11, 60 WLAN_STA_SUSPEND = 1<<11,
58 WLAN_STA_PS_DRIVER = 1<<12, 61 WLAN_STA_PS_DRIVER = 1<<12,
59 WLAN_STA_PSPOLL = 1<<13, 62 WLAN_STA_PSPOLL = 1<<13,
63 WLAN_STA_DISASSOC = 1<<14,
60}; 64};
61 65
62#define STA_TID_NUM 16 66#define STA_TID_NUM 16
@@ -162,11 +166,6 @@ struct sta_ampdu_mlme {
162}; 166};
163 167
164 168
165/* see __sta_info_unlink */
166#define STA_INFO_PIN_STAT_NORMAL 0
167#define STA_INFO_PIN_STAT_PINNED 1
168#define STA_INFO_PIN_STAT_DESTROY 2
169
170/** 169/**
171 * struct sta_info - STA information 170 * struct sta_info - STA information
172 * 171 *
@@ -187,7 +186,6 @@ struct sta_ampdu_mlme {
187 * @flaglock: spinlock for flags accesses 186 * @flaglock: spinlock for flags accesses
188 * @drv_unblock_wk: used for driver PS unblocking 187 * @drv_unblock_wk: used for driver PS unblocking
189 * @listen_interval: listen interval of this station, when we're acting as AP 188 * @listen_interval: listen interval of this station, when we're acting as AP
190 * @pin_status: used internally for pinning a STA struct into memory
191 * @flags: STA flags, see &enum ieee80211_sta_info_flags 189 * @flags: STA flags, see &enum ieee80211_sta_info_flags
192 * @ps_tx_buf: buffer of frames to transmit to this station 190 * @ps_tx_buf: buffer of frames to transmit to this station
193 * when it leaves power saving state 191 * when it leaves power saving state
@@ -226,6 +224,7 @@ struct sta_ampdu_mlme {
226 * @debugfs: debug filesystem info 224 * @debugfs: debug filesystem info
227 * @sta: station information we share with the driver 225 * @sta: station information we share with the driver
228 * @dead: set to true when sta is unlinked 226 * @dead: set to true when sta is unlinked
227 * @uploaded: set to true when sta is uploaded to the driver
229 */ 228 */
230struct sta_info { 229struct sta_info {
231 /* General information, mostly static */ 230 /* General information, mostly static */
@@ -245,11 +244,7 @@ struct sta_info {
245 244
246 bool dead; 245 bool dead;
247 246
248 /* 247 bool uploaded;
249 * for use by the internal lifetime management,
250 * see __sta_info_unlink
251 */
252 u8 pin_status;
253 248
254 /* 249 /*
255 * frequently updated, locked with own spinlock (flaglock), 250 * frequently updated, locked with own spinlock (flaglock),
@@ -403,9 +398,37 @@ static inline u32 get_sta_flags(struct sta_info *sta)
403#define STA_INFO_CLEANUP_INTERVAL (10 * HZ) 398#define STA_INFO_CLEANUP_INTERVAL (10 * HZ)
404 399
405/* 400/*
406 * Get a STA info, must have be under RCU read lock. 401 * Get a STA info, must be under RCU read lock.
407 */ 402 */
408struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr); 403struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
404 const u8 *addr);
405
406struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
407 const u8 *addr);
408
409static inline
410void for_each_sta_info_type_check(struct ieee80211_local *local,
411 const u8 *addr,
412 struct sta_info *sta,
413 struct sta_info *nxt)
414{
415}
416
417#define for_each_sta_info(local, _addr, sta, nxt) \
418 for ( /* initialise loop */ \
419 sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
420 nxt = sta ? rcu_dereference(sta->hnext) : NULL; \
421 /* typecheck */ \
422 for_each_sta_info_type_check(local, (_addr), sta, nxt), \
423 /* continue condition */ \
424 sta; \
425 /* advance loop */ \
426 sta = nxt, \
427 nxt = sta ? rcu_dereference(sta->hnext) : NULL \
428 ) \
429 /* compare address and run code only if it matches */ \
430 if (memcmp(sta->sta.addr, (_addr), ETH_ALEN) == 0)
431
409/* 432/*
410 * Get STA info by index, BROKEN! 433 * Get STA info by index, BROKEN!
411 */ 434 */
@@ -421,18 +444,19 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
421 * Insert STA info into hash table/list, returns zero or a 444 * Insert STA info into hash table/list, returns zero or a
422 * -EEXIST if (if the same MAC address is already present). 445 * -EEXIST if (if the same MAC address is already present).
423 * 446 *
424 * Calling this without RCU protection makes the caller 447 * Calling the non-rcu version makes the caller relinquish,
425 * relinquish its reference to @sta. 448 * the _rcu version calls read_lock_rcu() and must be called
449 * without it held.
426 */ 450 */
427int sta_info_insert(struct sta_info *sta); 451int sta_info_insert(struct sta_info *sta);
428/* 452int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU);
429 * Unlink a STA info from the hash table/list. 453int sta_info_insert_atomic(struct sta_info *sta);
430 * This can NULL the STA pointer if somebody else 454
431 * has already unlinked it. 455int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata,
432 */ 456 const u8 *addr);
433void sta_info_unlink(struct sta_info **sta); 457int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
458 const u8 *addr);
434 459
435void sta_info_destroy(struct sta_info *sta);
436void sta_info_set_tim_bit(struct sta_info *sta); 460void sta_info_set_tim_bit(struct sta_info *sta);
437void sta_info_clear_tim_bit(struct sta_info *sta); 461void sta_info_clear_tim_bit(struct sta_info *sta);
438 462
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index d78f36c64c7..ded98730c11 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -45,29 +45,19 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
45 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 45 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
46 46
47 /* 47 /*
48 * XXX: This is temporary! 48 * This skb 'survived' a round-trip through the driver, and
49 * 49 * hopefully the driver didn't mangle it too badly. However,
50 * The problem here is that when we get here, the driver will 50 * we can definitely not rely on the the control information
51 * quite likely have pretty much overwritten info->control by 51 * being correct. Clear it so we don't get junk there, and
52 * using info->driver_data or info->rate_driver_data. Thus, 52 * indicate that it needs new processing, but must not be
53 * when passing out the frame to the driver again, we would be 53 * modified/encrypted again.
54 * passing completely bogus data since the driver would then
55 * expect a properly filled info->control. In mac80211 itself
56 * the same problem occurs, since we need info->control.vif
57 * internally.
58 *
59 * To fix this, we should send the frame through TX processing
60 * again. However, it's not that simple, since the frame will
61 * have been software-encrypted (if applicable) already, and
62 * encrypting it again doesn't do much good. So to properly do
63 * that, we not only have to skip the actual 'raw' encryption
64 * (key selection etc. still has to be done!) but also the
65 * sequence number assignment since that impacts the crypto
66 * encapsulation, of course.
67 *
68 * Hence, for now, fix the bug by just dropping the frame.
69 */ 54 */
70 goto drop; 55 memset(&info->control, 0, sizeof(info->control));
56
57 info->control.jiffies = jiffies;
58 info->control.vif = &sta->sdata->vif;
59 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING |
60 IEEE80211_TX_INTFL_RETRANSMISSION;
71 61
72 sta->tx_filtered_count++; 62 sta->tx_filtered_count++;
73 63
@@ -122,7 +112,6 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
122 return; 112 return;
123 } 113 }
124 114
125 drop:
126#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 115#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
127 if (net_ratelimit()) 116 if (net_ratelimit())
128 printk(KERN_DEBUG "%s: dropped TX filtered frame, " 117 printk(KERN_DEBUG "%s: dropped TX filtered frame, "
@@ -134,6 +123,40 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
134 dev_kfree_skb(skb); 123 dev_kfree_skb(skb);
135} 124}
136 125
126static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
127{
128 struct ieee80211_mgmt *mgmt = (void *) skb->data;
129 struct ieee80211_local *local = sta->local;
130 struct ieee80211_sub_if_data *sdata = sta->sdata;
131
132 if (ieee80211_is_action(mgmt->frame_control) &&
133 sdata->vif.type == NL80211_IFTYPE_STATION &&
134 mgmt->u.action.category == WLAN_CATEGORY_HT &&
135 mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS) {
136 /*
137 * This update looks racy, but isn't -- if we come
138 * here we've definitely got a station that we're
139 * talking to, and on a managed interface that can
140 * only be the AP. And the only other place updating
141 * this variable is before we're associated.
142 */
143 switch (mgmt->u.action.u.ht_smps.smps_control) {
144 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
145 sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_DYNAMIC;
146 break;
147 case WLAN_HT_SMPS_CONTROL_STATIC:
148 sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_STATIC;
149 break;
150 case WLAN_HT_SMPS_CONTROL_DISABLED:
151 default: /* shouldn't happen since we don't send that */
152 sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_OFF;
153 break;
154 }
155
156 ieee80211_queue_work(&local->hw, &local->recalc_smps);
157 }
158}
159
137void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) 160void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
138{ 161{
139 struct sk_buff *skb2; 162 struct sk_buff *skb2;
@@ -146,7 +169,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
146 struct ieee80211_tx_status_rtap_hdr *rthdr; 169 struct ieee80211_tx_status_rtap_hdr *rthdr;
147 struct ieee80211_sub_if_data *sdata; 170 struct ieee80211_sub_if_data *sdata;
148 struct net_device *prev_dev = NULL; 171 struct net_device *prev_dev = NULL;
149 struct sta_info *sta; 172 struct sta_info *sta, *tmp;
150 int retry_count = -1, i; 173 int retry_count = -1, i;
151 bool injected; 174 bool injected;
152 175
@@ -165,10 +188,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
165 rcu_read_lock(); 188 rcu_read_lock();
166 189
167 sband = local->hw.wiphy->bands[info->band]; 190 sband = local->hw.wiphy->bands[info->band];
191 fc = hdr->frame_control;
168 192
169 sta = sta_info_get(local, hdr->addr1); 193 for_each_sta_info(local, hdr->addr1, sta, tmp) {
194 /* skip wrong virtual interface */
195 if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN))
196 continue;
170 197
171 if (sta) {
172 if (!(info->flags & IEEE80211_TX_STAT_ACK) && 198 if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
173 test_sta_flags(sta, WLAN_STA_PS_STA)) { 199 test_sta_flags(sta, WLAN_STA_PS_STA)) {
174 /* 200 /*
@@ -180,8 +206,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
180 return; 206 return;
181 } 207 }
182 208
183 fc = hdr->frame_control;
184
185 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) && 209 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
186 (ieee80211_is_data_qos(fc))) { 210 (ieee80211_is_data_qos(fc))) {
187 u16 tid, ssn; 211 u16 tid, ssn;
@@ -208,6 +232,10 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
208 rate_control_tx_status(local, sband, sta, skb); 232 rate_control_tx_status(local, sband, sta, skb);
209 if (ieee80211_vif_is_mesh(&sta->sdata->vif)) 233 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
210 ieee80211s_update_metric(local, sta, skb); 234 ieee80211s_update_metric(local, sta, skb);
235
236 if (!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
237 (info->flags & IEEE80211_TX_STAT_ACK))
238 ieee80211_frame_acked(sta, skb);
211 } 239 }
212 240
213 rcu_read_unlock(); 241 rcu_read_unlock();
@@ -246,6 +274,20 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
246 local->dot11FailedCount++; 274 local->dot11FailedCount++;
247 } 275 }
248 276
277 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
278 (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
279 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
280 local->ps_sdata && !(local->scanning)) {
281 if (info->flags & IEEE80211_TX_STAT_ACK) {
282 local->ps_sdata->u.mgd.flags |=
283 IEEE80211_STA_NULLFUNC_ACKED;
284 ieee80211_queue_work(&local->hw,
285 &local->dynamic_ps_enable_work);
286 } else
287 mod_timer(&local->dynamic_ps_timer, jiffies +
288 msecs_to_jiffies(10));
289 }
290
249 /* this was a transmitted frame, but now we want to reuse it */ 291 /* this was a transmitted frame, but now we want to reuse it */
250 skb_orphan(skb); 292 skb_orphan(skb);
251 293
@@ -311,7 +353,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
311 rcu_read_lock(); 353 rcu_read_lock();
312 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 354 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
313 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { 355 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
314 if (!netif_running(sdata->dev)) 356 if (!ieee80211_sdata_running(sdata))
315 continue; 357 continue;
316 358
317 if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) && 359 if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 4921d724b6c..7ef491e9d66 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -100,7 +100,7 @@ static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx,
100 p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j)); 100 p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j));
101 p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i; 101 p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i;
102 } 102 }
103 ctx->initialized = 1; 103 ctx->state = TKIP_STATE_PHASE1_DONE;
104} 104}
105 105
106static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx, 106static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx,
@@ -183,7 +183,7 @@ void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
183 /* Update the p1k only when the iv16 in the packet wraps around, this 183 /* Update the p1k only when the iv16 in the packet wraps around, this
184 * might occur after the wrap around of iv16 in the key in case of 184 * might occur after the wrap around of iv16 in the key in case of
185 * fragmented packets. */ 185 * fragmented packets. */
186 if (iv16 == 0 || !ctx->initialized) 186 if (iv16 == 0 || ctx->state == TKIP_STATE_NOT_INIT)
187 tkip_mixing_phase1(tk, ctx, hdr->addr2, iv32); 187 tkip_mixing_phase1(tk, ctx, hdr->addr2, iv32);
188 188
189 if (type == IEEE80211_TKIP_P1_KEY) { 189 if (type == IEEE80211_TKIP_P1_KEY) {
@@ -195,11 +195,13 @@ void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
195} 195}
196EXPORT_SYMBOL(ieee80211_get_tkip_key); 196EXPORT_SYMBOL(ieee80211_get_tkip_key);
197 197
198/* Encrypt packet payload with TKIP using @key. @pos is a pointer to the 198/*
199 * Encrypt packet payload with TKIP using @key. @pos is a pointer to the
199 * beginning of the buffer containing payload. This payload must include 200 * beginning of the buffer containing payload. This payload must include
200 * headroom of eight octets for IV and Ext. IV and taildroom of four octets 201 * the IV/Ext.IV and space for (taildroom) four octets for ICV.
201 * for ICV. @payload_len is the length of payload (_not_ including extra 202 * @payload_len is the length of payload (_not_ including IV/ICV length).
202 * headroom and tailroom). @ta is the transmitter addresses. */ 203 * @ta is the transmitter addresses.
204 */
203void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, 205void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
204 struct ieee80211_key *key, 206 struct ieee80211_key *key,
205 u8 *pos, size_t payload_len, u8 *ta) 207 u8 *pos, size_t payload_len, u8 *ta)
@@ -209,12 +211,11 @@ void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
209 const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; 211 const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
210 212
211 /* Calculate per-packet key */ 213 /* Calculate per-packet key */
212 if (ctx->iv16 == 0 || !ctx->initialized) 214 if (ctx->iv16 == 0 || ctx->state == TKIP_STATE_NOT_INIT)
213 tkip_mixing_phase1(tk, ctx, ta, ctx->iv32); 215 tkip_mixing_phase1(tk, ctx, ta, ctx->iv32);
214 216
215 tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key); 217 tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key);
216 218
217 pos = ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16);
218 ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len); 219 ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len);
219} 220}
220 221
@@ -259,7 +260,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
259 if ((keyid >> 6) != key->conf.keyidx) 260 if ((keyid >> 6) != key->conf.keyidx)
260 return TKIP_DECRYPT_INVALID_KEYIDX; 261 return TKIP_DECRYPT_INVALID_KEYIDX;
261 262
262 if (key->u.tkip.rx[queue].initialized && 263 if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT &&
263 (iv32 < key->u.tkip.rx[queue].iv32 || 264 (iv32 < key->u.tkip.rx[queue].iv32 ||
264 (iv32 == key->u.tkip.rx[queue].iv32 && 265 (iv32 == key->u.tkip.rx[queue].iv32 &&
265 iv16 <= key->u.tkip.rx[queue].iv16))) { 266 iv16 <= key->u.tkip.rx[queue].iv16))) {
@@ -275,11 +276,11 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
275 276
276 if (only_iv) { 277 if (only_iv) {
277 res = TKIP_DECRYPT_OK; 278 res = TKIP_DECRYPT_OK;
278 key->u.tkip.rx[queue].initialized = 1; 279 key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED;
279 goto done; 280 goto done;
280 } 281 }
281 282
282 if (!key->u.tkip.rx[queue].initialized || 283 if (key->u.tkip.rx[queue].state == TKIP_STATE_NOT_INIT ||
283 key->u.tkip.rx[queue].iv32 != iv32) { 284 key->u.tkip.rx[queue].iv32 != iv32) {
284 /* IV16 wrapped around - perform TKIP phase 1 */ 285 /* IV16 wrapped around - perform TKIP phase 1 */
285 tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32); 286 tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32);
@@ -299,18 +300,18 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
299 printk("\n"); 300 printk("\n");
300 } 301 }
301#endif 302#endif
302 if (key->local->ops->update_tkip_key && 303 }
303 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 304 if (key->local->ops->update_tkip_key &&
304 static const u8 bcast[ETH_ALEN] = 305 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
305 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 306 key->u.tkip.rx[queue].state != TKIP_STATE_PHASE1_HW_UPLOADED) {
306 const u8 *sta_addr = key->sta->sta.addr; 307 struct ieee80211_sub_if_data *sdata = key->sdata;
307 308
308 if (is_multicast_ether_addr(ra)) 309 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
309 sta_addr = bcast; 310 sdata = container_of(key->sdata->bss,
310 311 struct ieee80211_sub_if_data, u.ap);
311 drv_update_tkip_key(key->local, &key->conf, sta_addr, 312 drv_update_tkip_key(key->local, sdata, &key->conf, key->sta,
312 iv32, key->u.tkip.rx[queue].p1k); 313 iv32, key->u.tkip.rx[queue].p1k);
313 } 314 key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED;
314 } 315 }
315 316
316 tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key); 317 tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index ac210b58670..cbe53ed4fb0 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -180,6 +180,71 @@ static int inline is_ieee80211_device(struct ieee80211_local *local,
180} 180}
181 181
182/* tx handlers */ 182/* tx handlers */
183static ieee80211_tx_result debug_noinline
184ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
185{
186 struct ieee80211_local *local = tx->local;
187 struct ieee80211_if_managed *ifmgd;
188
189 /* driver doesn't support power save */
190 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
191 return TX_CONTINUE;
192
193 /* hardware does dynamic power save */
194 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
195 return TX_CONTINUE;
196
197 /* dynamic power save disabled */
198 if (local->hw.conf.dynamic_ps_timeout <= 0)
199 return TX_CONTINUE;
200
201 /* we are scanning, don't enable power save */
202 if (local->scanning)
203 return TX_CONTINUE;
204
205 if (!local->ps_sdata)
206 return TX_CONTINUE;
207
208 /* No point if we're going to suspend */
209 if (local->quiescing)
210 return TX_CONTINUE;
211
212 /* dynamic ps is supported only in managed mode */
213 if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
214 return TX_CONTINUE;
215
216 ifmgd = &tx->sdata->u.mgd;
217
218 /*
219 * Don't wakeup from power save if u-apsd is enabled, voip ac has
220 * u-apsd enabled and the frame is in voip class. This effectively
221 * means that even if all access categories have u-apsd enabled, in
222 * practise u-apsd is only used with the voip ac. This is a
223 * workaround for the case when received voip class packets do not
224 * have correct qos tag for some reason, due the network or the
225 * peer application.
226 *
227 * Note: local->uapsd_queues access is racy here. If the value is
228 * changed via debugfs, user needs to reassociate manually to have
229 * everything in sync.
230 */
231 if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
232 && (local->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
233 && skb_get_queue_mapping(tx->skb) == 0)
234 return TX_CONTINUE;
235
236 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
237 ieee80211_stop_queues_by_reason(&local->hw,
238 IEEE80211_QUEUE_STOP_REASON_PS);
239 ieee80211_queue_work(&local->hw,
240 &local->dynamic_ps_disable_work);
241 }
242
243 mod_timer(&local->dynamic_ps_timer, jiffies +
244 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
245
246 return TX_CONTINUE;
247}
183 248
184static ieee80211_tx_result debug_noinline 249static ieee80211_tx_result debug_noinline
185ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) 250ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
@@ -223,7 +288,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
223#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 288#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
224 printk(KERN_DEBUG "%s: dropped data frame to not " 289 printk(KERN_DEBUG "%s: dropped data frame to not "
225 "associated station %pM\n", 290 "associated station %pM\n",
226 tx->dev->name, hdr->addr1); 291 tx->sdata->name, hdr->addr1);
227#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 292#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
228 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); 293 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
229 return TX_DROP; 294 return TX_DROP;
@@ -331,7 +396,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
331#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 396#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
332 if (net_ratelimit()) 397 if (net_ratelimit())
333 printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n", 398 printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n",
334 tx->dev->name); 399 tx->sdata->name);
335#endif 400#endif
336 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); 401 dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
337 } else 402 } else
@@ -391,7 +456,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
391 if (net_ratelimit()) { 456 if (net_ratelimit()) {
392 printk(KERN_DEBUG "%s: STA %pM TX " 457 printk(KERN_DEBUG "%s: STA %pM TX "
393 "buffer full - dropping oldest frame\n", 458 "buffer full - dropping oldest frame\n",
394 tx->dev->name, sta->sta.addr); 459 tx->sdata->name, sta->sta.addr);
395 } 460 }
396#endif 461#endif
397 dev_kfree_skb(old); 462 dev_kfree_skb(old);
@@ -416,7 +481,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
416#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 481#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
417 else if (unlikely(staflags & WLAN_STA_PS_STA)) { 482 else if (unlikely(staflags & WLAN_STA_PS_STA)) {
418 printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll " 483 printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll "
419 "set -> send frame\n", tx->dev->name, 484 "set -> send frame\n", tx->sdata->name,
420 sta->sta.addr); 485 sta->sta.addr);
421 } 486 }
422#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 487#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
@@ -464,6 +529,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
464 tx->key = NULL; 529 tx->key = NULL;
465 530
466 if (tx->key) { 531 if (tx->key) {
532 bool skip_hw = false;
533
467 tx->key->tx_rx_count++; 534 tx->key->tx_rx_count++;
468 /* TODO: add threshold stuff again */ 535 /* TODO: add threshold stuff again */
469 536
@@ -480,16 +547,32 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
480 !ieee80211_use_mfp(hdr->frame_control, tx->sta, 547 !ieee80211_use_mfp(hdr->frame_control, tx->sta,
481 tx->skb)) 548 tx->skb))
482 tx->key = NULL; 549 tx->key = NULL;
550 else
551 skip_hw = (tx->key->conf.flags &
552 IEEE80211_KEY_FLAG_SW_MGMT) &&
553 ieee80211_is_mgmt(hdr->frame_control);
483 break; 554 break;
484 case ALG_AES_CMAC: 555 case ALG_AES_CMAC:
485 if (!ieee80211_is_mgmt(hdr->frame_control)) 556 if (!ieee80211_is_mgmt(hdr->frame_control))
486 tx->key = NULL; 557 tx->key = NULL;
487 break; 558 break;
488 } 559 }
560
561 if (!skip_hw && tx->key &&
562 tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
563 info->control.hw_key = &tx->key->conf;
489 } 564 }
490 565
491 if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) 566 return TX_CONTINUE;
492 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 567}
568
569static ieee80211_tx_result debug_noinline
570ieee80211_tx_h_sta(struct ieee80211_tx_data *tx)
571{
572 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
573
574 if (tx->sta && tx->sta->uploaded)
575 info->control.sta = &tx->sta->sta;
493 576
494 return TX_CONTINUE; 577 return TX_CONTINUE;
495} 578}
@@ -519,7 +602,12 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
519 txrc.bss_conf = &tx->sdata->vif.bss_conf; 602 txrc.bss_conf = &tx->sdata->vif.bss_conf;
520 txrc.skb = tx->skb; 603 txrc.skb = tx->skb;
521 txrc.reported_rate.idx = -1; 604 txrc.reported_rate.idx = -1;
522 txrc.max_rate_idx = tx->sdata->max_ratectrl_rateidx; 605 txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[tx->channel->band];
606 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
607 txrc.max_rate_idx = -1;
608 else
609 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
610 txrc.ap = tx->sdata->vif.type == NL80211_IFTYPE_AP;
523 611
524 /* set up RTS protection if desired */ 612 /* set up RTS protection if desired */
525 if (len > tx->local->hw.wiphy->rts_threshold) { 613 if (len > tx->local->hw.wiphy->rts_threshold) {
@@ -549,7 +637,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
549 "%s: Dropped data frame as no usable bitrate found while " 637 "%s: Dropped data frame as no usable bitrate found while "
550 "scanning and associated. Target station: " 638 "scanning and associated. Target station: "
551 "%pM on %d GHz band\n", 639 "%pM on %d GHz band\n",
552 tx->dev->name, hdr->addr1, 640 tx->sdata->name, hdr->addr1,
553 tx->channel->band ? 5 : 2)) 641 tx->channel->band ? 5 : 2))
554 return TX_DROP; 642 return TX_DROP;
555 643
@@ -664,17 +752,6 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
664} 752}
665 753
666static ieee80211_tx_result debug_noinline 754static ieee80211_tx_result debug_noinline
667ieee80211_tx_h_misc(struct ieee80211_tx_data *tx)
668{
669 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
670
671 if (tx->sta)
672 info->control.sta = &tx->sta->sta;
673
674 return TX_CONTINUE;
675}
676
677static ieee80211_tx_result debug_noinline
678ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) 755ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
679{ 756{
680 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 757 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
@@ -933,7 +1010,8 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
933 (struct ieee80211_radiotap_header *) skb->data; 1010 (struct ieee80211_radiotap_header *) skb->data;
934 struct ieee80211_supported_band *sband; 1011 struct ieee80211_supported_band *sband;
935 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1012 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
936 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); 1013 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
1014 NULL);
937 1015
938 sband = tx->local->hw.wiphy->bands[tx->channel->band]; 1016 sband = tx->local->hw.wiphy->bands[tx->channel->band];
939 1017
@@ -969,7 +1047,7 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
969 * because it will be recomputed and added 1047 * because it will be recomputed and added
970 * on transmission 1048 * on transmission
971 */ 1049 */
972 if (skb->len < (iterator.max_length + FCS_LEN)) 1050 if (skb->len < (iterator._max_length + FCS_LEN))
973 return false; 1051 return false;
974 1052
975 skb_trim(skb, skb->len - FCS_LEN); 1053 skb_trim(skb, skb->len - FCS_LEN);
@@ -996,10 +1074,10 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
996 1074
997 /* 1075 /*
998 * remove the radiotap header 1076 * remove the radiotap header
999 * iterator->max_length was sanity-checked against 1077 * iterator->_max_length was sanity-checked against
1000 * skb->len by iterator init 1078 * skb->len by iterator init
1001 */ 1079 */
1002 skb_pull(skb, iterator.max_length); 1080 skb_pull(skb, iterator._max_length);
1003 1081
1004 return true; 1082 return true;
1005} 1083}
@@ -1021,7 +1099,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1021 1099
1022 memset(tx, 0, sizeof(*tx)); 1100 memset(tx, 0, sizeof(*tx));
1023 tx->skb = skb; 1101 tx->skb = skb;
1024 tx->dev = sdata->dev; /* use original interface */
1025 tx->local = local; 1102 tx->local = local;
1026 tx->sdata = sdata; 1103 tx->sdata = sdata;
1027 tx->channel = local->hw.conf.channel; 1104 tx->channel = local->hw.conf.channel;
@@ -1032,7 +1109,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1032 tx->flags |= IEEE80211_TX_FRAGMENTED; 1109 tx->flags |= IEEE80211_TX_FRAGMENTED;
1033 1110
1034 /* process and remove the injection radiotap header */ 1111 /* process and remove the injection radiotap header */
1035 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) { 1112 if (unlikely(info->flags & IEEE80211_TX_INTFL_HAS_RADIOTAP)) {
1036 if (!__ieee80211_parse_tx_radiotap(tx, skb)) 1113 if (!__ieee80211_parse_tx_radiotap(tx, skb))
1037 return TX_DROP; 1114 return TX_DROP;
1038 1115
@@ -1041,6 +1118,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1041 * the radiotap header that was present and pre-filled 1118 * the radiotap header that was present and pre-filled
1042 * 'tx' with tx control information. 1119 * 'tx' with tx control information.
1043 */ 1120 */
1121 info->flags &= ~IEEE80211_TX_INTFL_HAS_RADIOTAP;
1044 } 1122 }
1045 1123
1046 /* 1124 /*
@@ -1052,10 +1130,15 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1052 1130
1053 hdr = (struct ieee80211_hdr *) skb->data; 1131 hdr = (struct ieee80211_hdr *) skb->data;
1054 1132
1055 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1133 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
1056 tx->sta = rcu_dereference(sdata->u.vlan.sta); 1134 tx->sta = rcu_dereference(sdata->u.vlan.sta);
1135 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
1136 return TX_DROP;
1137 } else if (info->flags & IEEE80211_TX_CTL_INJECTED) {
1138 tx->sta = sta_info_get_bss(sdata, hdr->addr1);
1139 }
1057 if (!tx->sta) 1140 if (!tx->sta)
1058 tx->sta = sta_info_get(local, hdr->addr1); 1141 tx->sta = sta_info_get(sdata, hdr->addr1);
1059 1142
1060 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && 1143 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
1061 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { 1144 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
@@ -1207,6 +1290,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
1207static int invoke_tx_handlers(struct ieee80211_tx_data *tx) 1290static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1208{ 1291{
1209 struct sk_buff *skb = tx->skb; 1292 struct sk_buff *skb = tx->skb;
1293 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1210 ieee80211_tx_result res = TX_DROP; 1294 ieee80211_tx_result res = TX_DROP;
1211 1295
1212#define CALL_TXH(txh) \ 1296#define CALL_TXH(txh) \
@@ -1216,13 +1300,18 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1216 goto txh_done; \ 1300 goto txh_done; \
1217 } while (0) 1301 } while (0)
1218 1302
1303 CALL_TXH(ieee80211_tx_h_dynamic_ps);
1219 CALL_TXH(ieee80211_tx_h_check_assoc); 1304 CALL_TXH(ieee80211_tx_h_check_assoc);
1220 CALL_TXH(ieee80211_tx_h_ps_buf); 1305 CALL_TXH(ieee80211_tx_h_ps_buf);
1221 CALL_TXH(ieee80211_tx_h_select_key); 1306 CALL_TXH(ieee80211_tx_h_select_key);
1222 CALL_TXH(ieee80211_tx_h_michael_mic_add); 1307 CALL_TXH(ieee80211_tx_h_sta);
1223 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)) 1308 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
1224 CALL_TXH(ieee80211_tx_h_rate_ctrl); 1309 CALL_TXH(ieee80211_tx_h_rate_ctrl);
1225 CALL_TXH(ieee80211_tx_h_misc); 1310
1311 if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION))
1312 goto txh_done;
1313
1314 CALL_TXH(ieee80211_tx_h_michael_mic_add);
1226 CALL_TXH(ieee80211_tx_h_sequence); 1315 CALL_TXH(ieee80211_tx_h_sequence);
1227 CALL_TXH(ieee80211_tx_h_fragment); 1316 CALL_TXH(ieee80211_tx_h_fragment);
1228 /* handlers after fragment must be aware of tx info fragmentation! */ 1317 /* handlers after fragment must be aware of tx info fragmentation! */
@@ -1398,34 +1487,6 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
1398 return 0; 1487 return 0;
1399} 1488}
1400 1489
1401static bool need_dynamic_ps(struct ieee80211_local *local)
1402{
1403 /* driver doesn't support power save */
1404 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
1405 return false;
1406
1407 /* hardware does dynamic power save */
1408 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
1409 return false;
1410
1411 /* dynamic power save disabled */
1412 if (local->hw.conf.dynamic_ps_timeout <= 0)
1413 return false;
1414
1415 /* we are scanning, don't enable power save */
1416 if (local->scanning)
1417 return false;
1418
1419 if (!local->ps_sdata)
1420 return false;
1421
1422 /* No point if we're going to suspend */
1423 if (local->quiescing)
1424 return false;
1425
1426 return true;
1427}
1428
1429static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, 1490static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1430 struct sk_buff *skb) 1491 struct sk_buff *skb)
1431{ 1492{
@@ -1436,25 +1497,14 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1436 int headroom; 1497 int headroom;
1437 bool may_encrypt; 1498 bool may_encrypt;
1438 1499
1439 if (need_dynamic_ps(local)) {
1440 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
1441 ieee80211_stop_queues_by_reason(&local->hw,
1442 IEEE80211_QUEUE_STOP_REASON_PS);
1443 ieee80211_queue_work(&local->hw,
1444 &local->dynamic_ps_disable_work);
1445 }
1446
1447 mod_timer(&local->dynamic_ps_timer, jiffies +
1448 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
1449 }
1450
1451 rcu_read_lock(); 1500 rcu_read_lock();
1452 1501
1453 if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) { 1502 if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
1454 int hdrlen; 1503 int hdrlen;
1455 u16 len_rthdr; 1504 u16 len_rthdr;
1456 1505
1457 info->flags |= IEEE80211_TX_CTL_INJECTED; 1506 info->flags |= IEEE80211_TX_CTL_INJECTED |
1507 IEEE80211_TX_INTFL_HAS_RADIOTAP;
1458 1508
1459 len_rthdr = ieee80211_get_radiotap_len(skb->data); 1509 len_rthdr = ieee80211_get_radiotap_len(skb->data);
1460 hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr); 1510 hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
@@ -1474,11 +1524,11 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1474 1524
1475 list_for_each_entry_rcu(tmp_sdata, &local->interfaces, 1525 list_for_each_entry_rcu(tmp_sdata, &local->interfaces,
1476 list) { 1526 list) {
1477 if (!netif_running(tmp_sdata->dev)) 1527 if (!ieee80211_sdata_running(tmp_sdata))
1478 continue; 1528 continue;
1479 if (tmp_sdata->vif.type != NL80211_IFTYPE_AP) 1529 if (tmp_sdata->vif.type != NL80211_IFTYPE_AP)
1480 continue; 1530 continue;
1481 if (compare_ether_addr(tmp_sdata->dev->dev_addr, 1531 if (compare_ether_addr(tmp_sdata->vif.addr,
1482 hdr->addr2) == 0) { 1532 hdr->addr2) == 0) {
1483 sdata = tmp_sdata; 1533 sdata = tmp_sdata;
1484 break; 1534 break;
@@ -1642,7 +1692,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1642 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1692 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1643 /* RA TA DA SA */ 1693 /* RA TA DA SA */
1644 memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN); 1694 memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
1645 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1695 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1646 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1696 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1647 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1697 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1648 hdrlen = 30; 1698 hdrlen = 30;
@@ -1656,7 +1706,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1656 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); 1706 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
1657 /* DA BSSID SA */ 1707 /* DA BSSID SA */
1658 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1708 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1659 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1709 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1660 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); 1710 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
1661 hdrlen = 24; 1711 hdrlen = 24;
1662 break; 1712 break;
@@ -1664,7 +1714,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1664 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1714 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1665 /* RA TA DA SA */ 1715 /* RA TA DA SA */
1666 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); 1716 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
1667 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1717 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1668 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1718 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1669 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1719 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1670 hdrlen = 30; 1720 hdrlen = 30;
@@ -1678,8 +1728,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1678 goto fail; 1728 goto fail;
1679 } 1729 }
1680 1730
1681 if (compare_ether_addr(dev->dev_addr, 1731 if (compare_ether_addr(sdata->vif.addr,
1682 skb->data + ETH_ALEN) == 0) { 1732 skb->data + ETH_ALEN) == 0) {
1683 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1733 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1684 skb->data, skb->data + ETH_ALEN); 1734 skb->data, skb->data + ETH_ALEN);
1685 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, 1735 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
@@ -1709,7 +1759,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1709 } 1759 }
1710 } 1760 }
1711 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1761 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1712 mesh_da, dev->dev_addr); 1762 mesh_da, sdata->vif.addr);
1713 rcu_read_unlock(); 1763 rcu_read_unlock();
1714 if (is_mesh_mcast) 1764 if (is_mesh_mcast)
1715 meshhdrlen = 1765 meshhdrlen =
@@ -1734,7 +1784,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1734 if (sdata->u.mgd.use_4addr && ethertype != ETH_P_PAE) { 1784 if (sdata->u.mgd.use_4addr && ethertype != ETH_P_PAE) {
1735 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1785 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1736 /* RA TA DA SA */ 1786 /* RA TA DA SA */
1737 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1787 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
1738 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1788 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1739 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1789 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1740 hdrlen = 30; 1790 hdrlen = 30;
@@ -1765,9 +1815,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1765 */ 1815 */
1766 if (!is_multicast_ether_addr(hdr.addr1)) { 1816 if (!is_multicast_ether_addr(hdr.addr1)) {
1767 rcu_read_lock(); 1817 rcu_read_lock();
1768 sta = sta_info_get(local, hdr.addr1); 1818 sta = sta_info_get(sdata, hdr.addr1);
1769 /* XXX: in the future, use sdata to look up the sta */ 1819 if (sta)
1770 if (sta && sta->sdata == sdata)
1771 sta_flags = get_sta_flags(sta); 1820 sta_flags = get_sta_flags(sta);
1772 rcu_read_unlock(); 1821 rcu_read_unlock();
1773 } 1822 }
@@ -1786,7 +1835,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1786 unlikely(!is_multicast_ether_addr(hdr.addr1) && 1835 unlikely(!is_multicast_ether_addr(hdr.addr1) &&
1787 !(sta_flags & WLAN_STA_AUTHORIZED) && 1836 !(sta_flags & WLAN_STA_AUTHORIZED) &&
1788 !(ethertype == ETH_P_PAE && 1837 !(ethertype == ETH_P_PAE &&
1789 compare_ether_addr(dev->dev_addr, 1838 compare_ether_addr(sdata->vif.addr,
1790 skb->data + ETH_ALEN) == 0))) { 1839 skb->data + ETH_ALEN) == 0))) {
1791#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1840#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1792 if (net_ratelimit()) 1841 if (net_ratelimit())
@@ -1926,7 +1975,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
1926 ieee80211_tx(sdata, skb, true); 1975 ieee80211_tx(sdata, skb, true);
1927 } else { 1976 } else {
1928 hdr = (struct ieee80211_hdr *)skb->data; 1977 hdr = (struct ieee80211_hdr *)skb->data;
1929 sta = sta_info_get(local, hdr->addr1); 1978 sta = sta_info_get(sdata, hdr->addr1);
1930 1979
1931 ret = __ieee80211_tx(local, &skb, sta, true); 1980 ret = __ieee80211_tx(local, &skb, sta, true);
1932 if (ret != IEEE80211_TX_OK) 1981 if (ret != IEEE80211_TX_OK)
@@ -2062,6 +2111,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2062 struct beacon_data *beacon; 2111 struct beacon_data *beacon;
2063 struct ieee80211_supported_band *sband; 2112 struct ieee80211_supported_band *sband;
2064 enum ieee80211_band band = local->hw.conf.channel->band; 2113 enum ieee80211_band band = local->hw.conf.channel->band;
2114 struct ieee80211_tx_rate_control txrc;
2065 2115
2066 sband = local->hw.wiphy->bands[band]; 2116 sband = local->hw.wiphy->bands[band];
2067 2117
@@ -2150,8 +2200,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2150 mgmt->frame_control = 2200 mgmt->frame_control =
2151 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); 2201 cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
2152 memset(mgmt->da, 0xff, ETH_ALEN); 2202 memset(mgmt->da, 0xff, ETH_ALEN);
2153 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 2203 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
2154 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN); 2204 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
2155 mgmt->u.beacon.beacon_int = 2205 mgmt->u.beacon.beacon_int =
2156 cpu_to_le16(sdata->vif.bss_conf.beacon_int); 2206 cpu_to_le16(sdata->vif.bss_conf.beacon_int);
2157 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */ 2207 mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */
@@ -2169,21 +2219,25 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2169 info = IEEE80211_SKB_CB(skb); 2219 info = IEEE80211_SKB_CB(skb);
2170 2220
2171 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 2221 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
2222 info->flags |= IEEE80211_TX_CTL_NO_ACK;
2172 info->band = band; 2223 info->band = band;
2173 /* 2224
2174 * XXX: For now, always use the lowest rate 2225 memset(&txrc, 0, sizeof(txrc));
2175 */ 2226 txrc.hw = hw;
2176 info->control.rates[0].idx = 0; 2227 txrc.sband = sband;
2177 info->control.rates[0].count = 1; 2228 txrc.bss_conf = &sdata->vif.bss_conf;
2178 info->control.rates[1].idx = -1; 2229 txrc.skb = skb;
2179 info->control.rates[2].idx = -1; 2230 txrc.reported_rate.idx = -1;
2180 info->control.rates[3].idx = -1; 2231 txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
2181 info->control.rates[4].idx = -1; 2232 if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
2182 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5); 2233 txrc.max_rate_idx = -1;
2234 else
2235 txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
2236 txrc.ap = true;
2237 rate_control_get_rate(sdata, NULL, &txrc);
2183 2238
2184 info->control.vif = vif; 2239 info->control.vif = vif;
2185 2240
2186 info->flags |= IEEE80211_TX_CTL_NO_ACK;
2187 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 2241 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
2188 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; 2242 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
2189 out: 2243 out:
@@ -2192,6 +2246,134 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2192} 2246}
2193EXPORT_SYMBOL(ieee80211_beacon_get_tim); 2247EXPORT_SYMBOL(ieee80211_beacon_get_tim);
2194 2248
2249struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
2250 struct ieee80211_vif *vif)
2251{
2252 struct ieee80211_sub_if_data *sdata;
2253 struct ieee80211_if_managed *ifmgd;
2254 struct ieee80211_pspoll *pspoll;
2255 struct ieee80211_local *local;
2256 struct sk_buff *skb;
2257
2258 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
2259 return NULL;
2260
2261 sdata = vif_to_sdata(vif);
2262 ifmgd = &sdata->u.mgd;
2263 local = sdata->local;
2264
2265 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
2266 if (!skb) {
2267 printk(KERN_DEBUG "%s: failed to allocate buffer for "
2268 "pspoll template\n", sdata->name);
2269 return NULL;
2270 }
2271 skb_reserve(skb, local->hw.extra_tx_headroom);
2272
2273 pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
2274 memset(pspoll, 0, sizeof(*pspoll));
2275 pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
2276 IEEE80211_STYPE_PSPOLL);
2277 pspoll->aid = cpu_to_le16(ifmgd->aid);
2278
2279 /* aid in PS-Poll has its two MSBs each set to 1 */
2280 pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
2281
2282 memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
2283 memcpy(pspoll->ta, vif->addr, ETH_ALEN);
2284
2285 return skb;
2286}
2287EXPORT_SYMBOL(ieee80211_pspoll_get);
2288
2289struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
2290 struct ieee80211_vif *vif)
2291{
2292 struct ieee80211_hdr_3addr *nullfunc;
2293 struct ieee80211_sub_if_data *sdata;
2294 struct ieee80211_if_managed *ifmgd;
2295 struct ieee80211_local *local;
2296 struct sk_buff *skb;
2297
2298 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
2299 return NULL;
2300
2301 sdata = vif_to_sdata(vif);
2302 ifmgd = &sdata->u.mgd;
2303 local = sdata->local;
2304
2305 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
2306 if (!skb) {
2307 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
2308 "template\n", sdata->name);
2309 return NULL;
2310 }
2311 skb_reserve(skb, local->hw.extra_tx_headroom);
2312
2313 nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb,
2314 sizeof(*nullfunc));
2315 memset(nullfunc, 0, sizeof(*nullfunc));
2316 nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
2317 IEEE80211_STYPE_NULLFUNC |
2318 IEEE80211_FCTL_TODS);
2319 memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
2320 memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
2321 memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
2322
2323 return skb;
2324}
2325EXPORT_SYMBOL(ieee80211_nullfunc_get);
2326
2327struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
2328 struct ieee80211_vif *vif,
2329 const u8 *ssid, size_t ssid_len,
2330 const u8 *ie, size_t ie_len)
2331{
2332 struct ieee80211_sub_if_data *sdata;
2333 struct ieee80211_local *local;
2334 struct ieee80211_hdr_3addr *hdr;
2335 struct sk_buff *skb;
2336 size_t ie_ssid_len;
2337 u8 *pos;
2338
2339 sdata = vif_to_sdata(vif);
2340 local = sdata->local;
2341 ie_ssid_len = 2 + ssid_len;
2342
2343 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
2344 ie_ssid_len + ie_len);
2345 if (!skb) {
2346 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
2347 "request template\n", sdata->name);
2348 return NULL;
2349 }
2350
2351 skb_reserve(skb, local->hw.extra_tx_headroom);
2352
2353 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
2354 memset(hdr, 0, sizeof(*hdr));
2355 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2356 IEEE80211_STYPE_PROBE_REQ);
2357 memset(hdr->addr1, 0xff, ETH_ALEN);
2358 memcpy(hdr->addr2, vif->addr, ETH_ALEN);
2359 memset(hdr->addr3, 0xff, ETH_ALEN);
2360
2361 pos = skb_put(skb, ie_ssid_len);
2362 *pos++ = WLAN_EID_SSID;
2363 *pos++ = ssid_len;
2364 if (ssid)
2365 memcpy(pos, ssid, ssid_len);
2366 pos += ssid_len;
2367
2368 if (ie) {
2369 pos = skb_put(skb, ie_len);
2370 memcpy(pos, ie, ie_len);
2371 }
2372
2373 return skb;
2374}
2375EXPORT_SYMBOL(ieee80211_probereq_get);
2376
2195void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2377void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2196 const void *frame, size_t frame_len, 2378 const void *frame, size_t frame_len,
2197 const struct ieee80211_tx_info *frame_txctl, 2379 const struct ieee80211_tx_info *frame_txctl,
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 3848140313f..c453226f06b 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -18,7 +18,6 @@
18#include <linux/skbuff.h> 18#include <linux/skbuff.h>
19#include <linux/etherdevice.h> 19#include <linux/etherdevice.h>
20#include <linux/if_arp.h> 20#include <linux/if_arp.h>
21#include <linux/wireless.h>
22#include <linux/bitmap.h> 21#include <linux/bitmap.h>
23#include <linux/crc32.h> 22#include <linux/crc32.h>
24#include <net/net_namespace.h> 23#include <net/net_namespace.h>
@@ -480,8 +479,8 @@ void ieee80211_iterate_active_interfaces(
480 case NL80211_IFTYPE_MESH_POINT: 479 case NL80211_IFTYPE_MESH_POINT:
481 break; 480 break;
482 } 481 }
483 if (netif_running(sdata->dev)) 482 if (ieee80211_sdata_running(sdata))
484 iterator(data, sdata->dev->dev_addr, 483 iterator(data, sdata->vif.addr,
485 &sdata->vif); 484 &sdata->vif);
486 } 485 }
487 486
@@ -514,8 +513,8 @@ void ieee80211_iterate_active_interfaces_atomic(
514 case NL80211_IFTYPE_MESH_POINT: 513 case NL80211_IFTYPE_MESH_POINT:
515 break; 514 break;
516 } 515 }
517 if (netif_running(sdata->dev)) 516 if (ieee80211_sdata_running(sdata))
518 iterator(data, sdata->dev->dev_addr, 517 iterator(data, sdata->vif.addr,
519 &sdata->vif); 518 &sdata->vif);
520 } 519 }
521 520
@@ -793,6 +792,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
793 break; 792 break;
794 } 793 }
795 794
795 qparam.uapsd = false;
796
796 drv_conf_tx(local, queue, &qparam); 797 drv_conf_tx(local, queue, &qparam);
797 } 798 }
798} 799}
@@ -860,7 +861,7 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
860 sizeof(*mgmt) + 6 + extra_len); 861 sizeof(*mgmt) + 6 + extra_len);
861 if (!skb) { 862 if (!skb) {
862 printk(KERN_DEBUG "%s: failed to allocate buffer for auth " 863 printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
863 "frame\n", sdata->dev->name); 864 "frame\n", sdata->name);
864 return; 865 return;
865 } 866 }
866 skb_reserve(skb, local->hw.extra_tx_headroom); 867 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -870,7 +871,7 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
870 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 871 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
871 IEEE80211_STYPE_AUTH); 872 IEEE80211_STYPE_AUTH);
872 memcpy(mgmt->da, bssid, ETH_ALEN); 873 memcpy(mgmt->da, bssid, ETH_ALEN);
873 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 874 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
874 memcpy(mgmt->bssid, bssid, ETH_ALEN); 875 memcpy(mgmt->bssid, bssid, ETH_ALEN);
875 mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg); 876 mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg);
876 mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); 877 mgmt->u.auth.auth_transaction = cpu_to_le16(transaction);
@@ -893,43 +894,87 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
893 enum ieee80211_band band) 894 enum ieee80211_band band)
894{ 895{
895 struct ieee80211_supported_band *sband; 896 struct ieee80211_supported_band *sband;
896 u8 *pos, *supp_rates_len, *esupp_rates_len = NULL; 897 u8 *pos;
897 int i; 898 size_t offset = 0, noffset;
899 int supp_rates_len, i;
898 900
899 sband = local->hw.wiphy->bands[band]; 901 sband = local->hw.wiphy->bands[band];
900 902
901 pos = buffer; 903 pos = buffer;
902 904
905 supp_rates_len = min_t(int, sband->n_bitrates, 8);
906
903 *pos++ = WLAN_EID_SUPP_RATES; 907 *pos++ = WLAN_EID_SUPP_RATES;
904 supp_rates_len = pos; 908 *pos++ = supp_rates_len;
905 *pos++ = 0; 909
906 910 for (i = 0; i < supp_rates_len; i++) {
907 for (i = 0; i < sband->n_bitrates; i++) { 911 int rate = sband->bitrates[i].bitrate;
908 struct ieee80211_rate *rate = &sband->bitrates[i]; 912 *pos++ = (u8) (rate / 5);
909 913 }
910 if (esupp_rates_len) { 914
911 *esupp_rates_len += 1; 915 /* insert "request information" if in custom IEs */
912 } else if (*supp_rates_len == 8) { 916 if (ie && ie_len) {
913 *pos++ = WLAN_EID_EXT_SUPP_RATES; 917 static const u8 before_extrates[] = {
914 esupp_rates_len = pos; 918 WLAN_EID_SSID,
915 *pos++ = 1; 919 WLAN_EID_SUPP_RATES,
916 } else 920 WLAN_EID_REQUEST,
917 *supp_rates_len += 1; 921 };
922 noffset = ieee80211_ie_split(ie, ie_len,
923 before_extrates,
924 ARRAY_SIZE(before_extrates),
925 offset);
926 memcpy(pos, ie + offset, noffset - offset);
927 pos += noffset - offset;
928 offset = noffset;
929 }
930
931 if (sband->n_bitrates > i) {
932 *pos++ = WLAN_EID_EXT_SUPP_RATES;
933 *pos++ = sband->n_bitrates - i;
934
935 for (; i < sband->n_bitrates; i++) {
936 int rate = sband->bitrates[i].bitrate;
937 *pos++ = (u8) (rate / 5);
938 }
939 }
918 940
919 *pos++ = rate->bitrate / 5; 941 /* insert custom IEs that go before HT */
942 if (ie && ie_len) {
943 static const u8 before_ht[] = {
944 WLAN_EID_SSID,
945 WLAN_EID_SUPP_RATES,
946 WLAN_EID_REQUEST,
947 WLAN_EID_EXT_SUPP_RATES,
948 WLAN_EID_DS_PARAMS,
949 WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
950 };
951 noffset = ieee80211_ie_split(ie, ie_len,
952 before_ht, ARRAY_SIZE(before_ht),
953 offset);
954 memcpy(pos, ie + offset, noffset - offset);
955 pos += noffset - offset;
956 offset = noffset;
920 } 957 }
921 958
922 if (sband->ht_cap.ht_supported) { 959 if (sband->ht_cap.ht_supported) {
923 __le16 tmp = cpu_to_le16(sband->ht_cap.cap); 960 u16 cap = sband->ht_cap.cap;
961 __le16 tmp;
962
963 if (ieee80211_disable_40mhz_24ghz &&
964 sband->band == IEEE80211_BAND_2GHZ) {
965 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
966 cap &= ~IEEE80211_HT_CAP_SGI_40;
967 }
924 968
925 *pos++ = WLAN_EID_HT_CAPABILITY; 969 *pos++ = WLAN_EID_HT_CAPABILITY;
926 *pos++ = sizeof(struct ieee80211_ht_cap); 970 *pos++ = sizeof(struct ieee80211_ht_cap);
927 memset(pos, 0, sizeof(struct ieee80211_ht_cap)); 971 memset(pos, 0, sizeof(struct ieee80211_ht_cap));
972 tmp = cpu_to_le16(cap);
928 memcpy(pos, &tmp, sizeof(u16)); 973 memcpy(pos, &tmp, sizeof(u16));
929 pos += sizeof(u16); 974 pos += sizeof(u16);
930 /* TODO: needs a define here for << 2 */
931 *pos++ = sband->ht_cap.ampdu_factor | 975 *pos++ = sband->ht_cap.ampdu_factor |
932 (sband->ht_cap.ampdu_density << 2); 976 (sband->ht_cap.ampdu_density <<
977 IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
933 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); 978 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
934 pos += sizeof(sband->ht_cap.mcs); 979 pos += sizeof(sband->ht_cap.mcs);
935 pos += 2 + 4 + 1; /* ext info, BF cap, antsel */ 980 pos += 2 + 4 + 1; /* ext info, BF cap, antsel */
@@ -940,9 +985,11 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
940 * that calculates local->scan_ies_len. 985 * that calculates local->scan_ies_len.
941 */ 986 */
942 987
943 if (ie) { 988 /* add any remaining custom IEs */
944 memcpy(pos, ie, ie_len); 989 if (ie && ie_len) {
945 pos += ie_len; 990 noffset = ie_len;
991 memcpy(pos, ie + offset, noffset - offset);
992 pos += noffset - offset;
946 } 993 }
947 994
948 return pos - buffer; 995 return pos - buffer;
@@ -955,40 +1002,33 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
955 struct ieee80211_local *local = sdata->local; 1002 struct ieee80211_local *local = sdata->local;
956 struct sk_buff *skb; 1003 struct sk_buff *skb;
957 struct ieee80211_mgmt *mgmt; 1004 struct ieee80211_mgmt *mgmt;
958 u8 *pos; 1005 size_t buf_len;
959 1006 u8 *buf;
960 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200 + 1007
961 ie_len); 1008 /* FIXME: come up with a proper value */
962 if (!skb) { 1009 buf = kmalloc(200 + ie_len, GFP_KERNEL);
963 printk(KERN_DEBUG "%s: failed to allocate buffer for probe " 1010 if (!buf) {
964 "request\n", sdata->dev->name); 1011 printk(KERN_DEBUG "%s: failed to allocate temporary IE "
1012 "buffer\n", sdata->name);
965 return; 1013 return;
966 } 1014 }
967 skb_reserve(skb, local->hw.extra_tx_headroom);
968 1015
969 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 1016 buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len,
970 memset(mgmt, 0, 24); 1017 local->hw.conf.channel->band);
971 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 1018
972 IEEE80211_STYPE_PROBE_REQ); 1019 skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
973 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN); 1020 ssid, ssid_len,
1021 buf, buf_len);
1022
974 if (dst) { 1023 if (dst) {
1024 mgmt = (struct ieee80211_mgmt *) skb->data;
975 memcpy(mgmt->da, dst, ETH_ALEN); 1025 memcpy(mgmt->da, dst, ETH_ALEN);
976 memcpy(mgmt->bssid, dst, ETH_ALEN); 1026 memcpy(mgmt->bssid, dst, ETH_ALEN);
977 } else {
978 memset(mgmt->da, 0xff, ETH_ALEN);
979 memset(mgmt->bssid, 0xff, ETH_ALEN);
980 } 1027 }
981 pos = skb_put(skb, 2 + ssid_len);
982 *pos++ = WLAN_EID_SSID;
983 *pos++ = ssid_len;
984 memcpy(pos, ssid, ssid_len);
985 pos += ssid_len;
986
987 skb_put(skb, ieee80211_build_preq_ies(local, pos, ie, ie_len,
988 local->hw.conf.channel->band));
989 1028
990 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 1029 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
991 ieee80211_tx_skb(sdata, skb); 1030 ieee80211_tx_skb(sdata, skb);
1031 kfree(buf);
992} 1032}
993 1033
994u32 ieee80211_sta_get_rates(struct ieee80211_local *local, 1034u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
@@ -1032,18 +1072,16 @@ void ieee80211_stop_device(struct ieee80211_local *local)
1032 ieee80211_led_radio(local, false); 1072 ieee80211_led_radio(local, false);
1033 1073
1034 cancel_work_sync(&local->reconfig_filter); 1074 cancel_work_sync(&local->reconfig_filter);
1035 drv_stop(local);
1036 1075
1037 flush_workqueue(local->workqueue); 1076 flush_workqueue(local->workqueue);
1077 drv_stop(local);
1038} 1078}
1039 1079
1040int ieee80211_reconfig(struct ieee80211_local *local) 1080int ieee80211_reconfig(struct ieee80211_local *local)
1041{ 1081{
1042 struct ieee80211_hw *hw = &local->hw; 1082 struct ieee80211_hw *hw = &local->hw;
1043 struct ieee80211_sub_if_data *sdata; 1083 struct ieee80211_sub_if_data *sdata;
1044 struct ieee80211_if_init_conf conf;
1045 struct sta_info *sta; 1084 struct sta_info *sta;
1046 unsigned long flags;
1047 int res; 1085 int res;
1048 1086
1049 if (local->suspended) 1087 if (local->suspended)
@@ -1061,7 +1099,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1061 if (res) { 1099 if (res) {
1062 WARN(local->suspended, "Harware became unavailable " 1100 WARN(local->suspended, "Harware became unavailable "
1063 "upon resume. This is could be a software issue" 1101 "upon resume. This is could be a software issue"
1064 "prior to suspend or a harware issue\n"); 1102 "prior to suspend or a hardware issue\n");
1065 return res; 1103 return res;
1066 } 1104 }
1067 1105
@@ -1072,29 +1110,24 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1072 list_for_each_entry(sdata, &local->interfaces, list) { 1110 list_for_each_entry(sdata, &local->interfaces, list) {
1073 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 1111 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1074 sdata->vif.type != NL80211_IFTYPE_MONITOR && 1112 sdata->vif.type != NL80211_IFTYPE_MONITOR &&
1075 netif_running(sdata->dev)) { 1113 ieee80211_sdata_running(sdata))
1076 conf.vif = &sdata->vif; 1114 res = drv_add_interface(local, &sdata->vif);
1077 conf.type = sdata->vif.type;
1078 conf.mac_addr = sdata->dev->dev_addr;
1079 res = drv_add_interface(local, &conf);
1080 }
1081 } 1115 }
1082 1116
1083 /* add STAs back */ 1117 /* add STAs back */
1084 if (local->ops->sta_notify) { 1118 mutex_lock(&local->sta_mtx);
1085 spin_lock_irqsave(&local->sta_lock, flags); 1119 list_for_each_entry(sta, &local->sta_list, list) {
1086 list_for_each_entry(sta, &local->sta_list, list) { 1120 if (sta->uploaded) {
1087 sdata = sta->sdata; 1121 sdata = sta->sdata;
1088 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1122 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1089 sdata = container_of(sdata->bss, 1123 sdata = container_of(sdata->bss,
1090 struct ieee80211_sub_if_data, 1124 struct ieee80211_sub_if_data,
1091 u.ap); 1125 u.ap);
1092 1126
1093 drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD, 1127 WARN_ON(drv_sta_add(local, sdata, &sta->sta));
1094 &sta->sta);
1095 } 1128 }
1096 spin_unlock_irqrestore(&local->sta_lock, flags);
1097 } 1129 }
1130 mutex_unlock(&local->sta_mtx);
1098 1131
1099 /* Clear Suspend state so that ADDBA requests can be processed */ 1132 /* Clear Suspend state so that ADDBA requests can be processed */
1100 1133
@@ -1119,7 +1152,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1119 /* Finally also reconfigure all the BSS information */ 1152 /* Finally also reconfigure all the BSS information */
1120 list_for_each_entry(sdata, &local->interfaces, list) { 1153 list_for_each_entry(sdata, &local->interfaces, list) {
1121 u32 changed = ~0; 1154 u32 changed = ~0;
1122 if (!netif_running(sdata->dev)) 1155 if (!ieee80211_sdata_running(sdata))
1123 continue; 1156 continue;
1124 switch (sdata->vif.type) { 1157 switch (sdata->vif.type) {
1125 case NL80211_IFTYPE_STATION: 1158 case NL80211_IFTYPE_STATION:
@@ -1145,9 +1178,17 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1145 } 1178 }
1146 } 1179 }
1147 1180
1181 rcu_read_lock();
1182 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
1183 list_for_each_entry_rcu(sta, &local->sta_list, list) {
1184 ieee80211_sta_tear_down_BA_sessions(sta);
1185 }
1186 }
1187 rcu_read_unlock();
1188
1148 /* add back keys */ 1189 /* add back keys */
1149 list_for_each_entry(sdata, &local->interfaces, list) 1190 list_for_each_entry(sdata, &local->interfaces, list)
1150 if (netif_running(sdata->dev)) 1191 if (ieee80211_sdata_running(sdata))
1151 ieee80211_enable_keys(sdata); 1192 ieee80211_enable_keys(sdata);
1152 1193
1153 ieee80211_wake_queues_by_reason(hw, 1194 ieee80211_wake_queues_by_reason(hw,
@@ -1184,13 +1225,143 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1184 1225
1185 add_timer(&local->sta_cleanup); 1226 add_timer(&local->sta_cleanup);
1186 1227
1187 spin_lock_irqsave(&local->sta_lock, flags); 1228 mutex_lock(&local->sta_mtx);
1188 list_for_each_entry(sta, &local->sta_list, list) 1229 list_for_each_entry(sta, &local->sta_list, list)
1189 mesh_plink_restart(sta); 1230 mesh_plink_restart(sta);
1190 spin_unlock_irqrestore(&local->sta_lock, flags); 1231 mutex_unlock(&local->sta_mtx);
1191#else 1232#else
1192 WARN_ON(1); 1233 WARN_ON(1);
1193#endif 1234#endif
1194 return 0; 1235 return 0;
1195} 1236}
1196 1237
1238static int check_mgd_smps(struct ieee80211_if_managed *ifmgd,
1239 enum ieee80211_smps_mode *smps_mode)
1240{
1241 if (ifmgd->associated) {
1242 *smps_mode = ifmgd->ap_smps;
1243
1244 if (*smps_mode == IEEE80211_SMPS_AUTOMATIC) {
1245 if (ifmgd->powersave)
1246 *smps_mode = IEEE80211_SMPS_DYNAMIC;
1247 else
1248 *smps_mode = IEEE80211_SMPS_OFF;
1249 }
1250
1251 return 1;
1252 }
1253
1254 return 0;
1255}
1256
1257/* must hold iflist_mtx */
1258void ieee80211_recalc_smps(struct ieee80211_local *local,
1259 struct ieee80211_sub_if_data *forsdata)
1260{
1261 struct ieee80211_sub_if_data *sdata;
1262 enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_OFF;
1263 int count = 0;
1264
1265 if (forsdata)
1266 WARN_ON(!mutex_is_locked(&forsdata->u.mgd.mtx));
1267
1268 WARN_ON(!mutex_is_locked(&local->iflist_mtx));
1269
1270 /*
1271 * This function could be improved to handle multiple
1272 * interfaces better, but right now it makes any
1273 * non-station interfaces force SM PS to be turned
1274 * off. If there are multiple station interfaces it
1275 * could also use the best possible mode, e.g. if
1276 * one is in static and the other in dynamic then
1277 * dynamic is ok.
1278 */
1279
1280 list_for_each_entry(sdata, &local->interfaces, list) {
1281 if (!netif_running(sdata->dev))
1282 continue;
1283 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1284 goto set;
1285 if (sdata != forsdata) {
1286 /*
1287 * This nested is ok -- we are holding the iflist_mtx
1288 * so can't get here twice or so. But it's required
1289 * since normally we acquire it first and then the
1290 * iflist_mtx.
1291 */
1292 mutex_lock_nested(&sdata->u.mgd.mtx, SINGLE_DEPTH_NESTING);
1293 count += check_mgd_smps(&sdata->u.mgd, &smps_mode);
1294 mutex_unlock(&sdata->u.mgd.mtx);
1295 } else
1296 count += check_mgd_smps(&sdata->u.mgd, &smps_mode);
1297
1298 if (count > 1) {
1299 smps_mode = IEEE80211_SMPS_OFF;
1300 break;
1301 }
1302 }
1303
1304 if (smps_mode == local->smps_mode)
1305 return;
1306
1307 set:
1308 local->smps_mode = smps_mode;
1309 /* changed flag is auto-detected for this */
1310 ieee80211_hw_config(local, 0);
1311}
1312
1313static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id)
1314{
1315 int i;
1316
1317 for (i = 0; i < n_ids; i++)
1318 if (ids[i] == id)
1319 return true;
1320 return false;
1321}
1322
1323/**
1324 * ieee80211_ie_split - split an IE buffer according to ordering
1325 *
1326 * @ies: the IE buffer
1327 * @ielen: the length of the IE buffer
1328 * @ids: an array with element IDs that are allowed before
1329 * the split
1330 * @n_ids: the size of the element ID array
1331 * @offset: offset where to start splitting in the buffer
1332 *
1333 * This function splits an IE buffer by updating the @offset
1334 * variable to point to the location where the buffer should be
1335 * split.
1336 *
1337 * It assumes that the given IE buffer is well-formed, this
1338 * has to be guaranteed by the caller!
1339 *
1340 * It also assumes that the IEs in the buffer are ordered
1341 * correctly, if not the result of using this function will not
1342 * be ordered correctly either, i.e. it does no reordering.
1343 *
1344 * The function returns the offset where the next part of the
1345 * buffer starts, which may be @ielen if the entire (remainder)
1346 * of the buffer should be used.
1347 */
1348size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
1349 const u8 *ids, int n_ids, size_t offset)
1350{
1351 size_t pos = offset;
1352
1353 while (pos < ielen && ieee80211_id_in_list(ids, n_ids, ies[pos]))
1354 pos += 2 + ies[pos + 1];
1355
1356 return pos;
1357}
1358
1359size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset)
1360{
1361 size_t pos = offset;
1362
1363 while (pos < ielen && ies[pos] != WLAN_EID_VENDOR_SPECIFIC)
1364 pos += 2 + ies[pos + 1];
1365
1366 return pos;
1367}
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 247123fe1a7..5d745f2d723 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -305,20 +305,19 @@ static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
305{ 305{
306 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 306 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
307 307
308 if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { 308 if (!info->control.hw_key) {
309 if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key, 309 if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key,
310 tx->key->conf.keylen, 310 tx->key->conf.keylen,
311 tx->key->conf.keyidx)) 311 tx->key->conf.keyidx))
312 return -1; 312 return -1;
313 } else { 313 } else if (info->control.hw_key->flags &
314 info->control.hw_key = &tx->key->conf; 314 IEEE80211_KEY_FLAG_GENERATE_IV) {
315 if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) { 315 if (!ieee80211_wep_add_iv(tx->local, skb,
316 if (!ieee80211_wep_add_iv(tx->local, skb, 316 tx->key->conf.keylen,
317 tx->key->conf.keylen, 317 tx->key->conf.keyidx))
318 tx->key->conf.keyidx)) 318 return -1;
319 return -1;
320 }
321 } 319 }
320
322 return 0; 321 return 0;
323} 322}
324 323
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 79d887dae73..34e6d02da77 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -96,7 +96,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
96 } 96 }
97 97
98 if (!sta && ra && !is_multicast_ether_addr(ra)) { 98 if (!sta && ra && !is_multicast_ether_addr(ra)) {
99 sta = sta_info_get(local, ra); 99 sta = sta_info_get(sdata, ra);
100 if (sta) 100 if (sta)
101 sta_flags = get_sta_flags(sta); 101 sta_flags = get_sta_flags(sta);
102 } 102 }
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
new file mode 100644
index 00000000000..7e708d5c88b
--- /dev/null
+++ b/net/mac80211/work.c
@@ -0,0 +1,1099 @@
1/*
2 * mac80211 work implementation
3 *
4 * Copyright 2003-2008, Jouni Malinen <j@w1.fi>
5 * Copyright 2004, Instant802 Networks, Inc.
6 * Copyright 2005, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/delay.h>
17#include <linux/if_ether.h>
18#include <linux/skbuff.h>
19#include <linux/if_arp.h>
20#include <linux/etherdevice.h>
21#include <linux/crc32.h>
22#include <net/mac80211.h>
23#include <asm/unaligned.h>
24
25#include "ieee80211_i.h"
26#include "rate.h"
27
28#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
29#define IEEE80211_AUTH_MAX_TRIES 3
30#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
31#define IEEE80211_ASSOC_MAX_TRIES 3
32#define IEEE80211_MAX_PROBE_TRIES 5
33
34enum work_action {
35 WORK_ACT_NONE,
36 WORK_ACT_TIMEOUT,
37 WORK_ACT_DONE,
38};
39
40
41/* utils */
42static inline void ASSERT_WORK_MTX(struct ieee80211_local *local)
43{
44 WARN_ON(!mutex_is_locked(&local->work_mtx));
45}
46
47/*
48 * We can have multiple work items (and connection probing)
49 * scheduling this timer, but we need to take care to only
50 * reschedule it when it should fire _earlier_ than it was
51 * asked for before, or if it's not pending right now. This
52 * function ensures that. Note that it then is required to
53 * run this function for all timeouts after the first one
54 * has happened -- the work that runs from this timer will
55 * do that.
56 */
57static void run_again(struct ieee80211_local *local,
58 unsigned long timeout)
59{
60 ASSERT_WORK_MTX(local);
61
62 if (!timer_pending(&local->work_timer) ||
63 time_before(timeout, local->work_timer.expires))
64 mod_timer(&local->work_timer, timeout);
65}
66
67static void work_free_rcu(struct rcu_head *head)
68{
69 struct ieee80211_work *wk =
70 container_of(head, struct ieee80211_work, rcu_head);
71
72 kfree(wk);
73}
74
75void free_work(struct ieee80211_work *wk)
76{
77 call_rcu(&wk->rcu_head, work_free_rcu);
78}
79
80static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
81 struct ieee80211_supported_band *sband,
82 u32 *rates)
83{
84 int i, j, count;
85 *rates = 0;
86 count = 0;
87 for (i = 0; i < supp_rates_len; i++) {
88 int rate = (supp_rates[i] & 0x7F) * 5;
89
90 for (j = 0; j < sband->n_bitrates; j++)
91 if (sband->bitrates[j].bitrate == rate) {
92 *rates |= BIT(j);
93 count++;
94 break;
95 }
96 }
97
98 return count;
99}
100
101/* frame sending functions */
102
103static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
104 struct ieee80211_supported_band *sband,
105 struct ieee80211_channel *channel,
106 enum ieee80211_smps_mode smps)
107{
108 struct ieee80211_ht_info *ht_info;
109 u8 *pos;
110 u32 flags = channel->flags;
111 u16 cap = sband->ht_cap.cap;
112 __le16 tmp;
113
114 if (!sband->ht_cap.ht_supported)
115 return;
116
117 if (!ht_info_ie)
118 return;
119
120 if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info))
121 return;
122
123 ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2);
124
125 /* determine capability flags */
126
127 if (ieee80211_disable_40mhz_24ghz &&
128 sband->band == IEEE80211_BAND_2GHZ) {
129 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
130 cap &= ~IEEE80211_HT_CAP_SGI_40;
131 }
132
133 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
134 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
135 if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
136 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
137 cap &= ~IEEE80211_HT_CAP_SGI_40;
138 }
139 break;
140 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
141 if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
142 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
143 cap &= ~IEEE80211_HT_CAP_SGI_40;
144 }
145 break;
146 }
147
148 /* set SM PS mode properly */
149 cap &= ~IEEE80211_HT_CAP_SM_PS;
150 switch (smps) {
151 case IEEE80211_SMPS_AUTOMATIC:
152 case IEEE80211_SMPS_NUM_MODES:
153 WARN_ON(1);
154 case IEEE80211_SMPS_OFF:
155 cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
156 IEEE80211_HT_CAP_SM_PS_SHIFT;
157 break;
158 case IEEE80211_SMPS_STATIC:
159 cap |= WLAN_HT_CAP_SM_PS_STATIC <<
160 IEEE80211_HT_CAP_SM_PS_SHIFT;
161 break;
162 case IEEE80211_SMPS_DYNAMIC:
163 cap |= WLAN_HT_CAP_SM_PS_DYNAMIC <<
164 IEEE80211_HT_CAP_SM_PS_SHIFT;
165 break;
166 }
167
168 /* reserve and fill IE */
169
170 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
171 *pos++ = WLAN_EID_HT_CAPABILITY;
172 *pos++ = sizeof(struct ieee80211_ht_cap);
173 memset(pos, 0, sizeof(struct ieee80211_ht_cap));
174
175 /* capability flags */
176 tmp = cpu_to_le16(cap);
177 memcpy(pos, &tmp, sizeof(u16));
178 pos += sizeof(u16);
179
180 /* AMPDU parameters */
181 *pos++ = sband->ht_cap.ampdu_factor |
182 (sband->ht_cap.ampdu_density <<
183 IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
184
185 /* MCS set */
186 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
187 pos += sizeof(sband->ht_cap.mcs);
188
189 /* extended capabilities */
190 pos += sizeof(__le16);
191
192 /* BF capabilities */
193 pos += sizeof(__le32);
194
195 /* antenna selection */
196 pos += sizeof(u8);
197}
198
199static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
200 struct ieee80211_work *wk)
201{
202 struct ieee80211_local *local = sdata->local;
203 struct sk_buff *skb;
204 struct ieee80211_mgmt *mgmt;
205 u8 *pos, qos_info;
206 const u8 *ies;
207 size_t offset = 0, noffset;
208 int i, len, count, rates_len, supp_rates_len;
209 u16 capab;
210 struct ieee80211_supported_band *sband;
211 u32 rates = 0;
212
213 sband = local->hw.wiphy->bands[wk->chan->band];
214
215 /*
216 * Get all rates supported by the device and the AP as
217 * some APs don't like getting a superset of their rates
218 * in the association request (e.g. D-Link DAP 1353 in
219 * b-only mode)...
220 */
221 rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates,
222 wk->assoc.supp_rates_len,
223 sband, &rates);
224
225 skb = alloc_skb(local->hw.extra_tx_headroom +
226 sizeof(*mgmt) + /* bit too much but doesn't matter */
227 2 + wk->assoc.ssid_len + /* SSID */
228 4 + rates_len + /* (extended) rates */
229 4 + /* power capability */
230 2 + 2 * sband->n_channels + /* supported channels */
231 2 + sizeof(struct ieee80211_ht_cap) + /* HT */
232 wk->ie_len + /* extra IEs */
233 9, /* WMM */
234 GFP_KERNEL);
235 if (!skb) {
236 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
237 "frame\n", sdata->name);
238 return;
239 }
240 skb_reserve(skb, local->hw.extra_tx_headroom);
241
242 capab = WLAN_CAPABILITY_ESS;
243
244 if (sband->band == IEEE80211_BAND_2GHZ) {
245 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
246 capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
247 if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
248 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
249 }
250
251 if (wk->assoc.capability & WLAN_CAPABILITY_PRIVACY)
252 capab |= WLAN_CAPABILITY_PRIVACY;
253
254 if ((wk->assoc.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
255 (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
256 capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
257
258 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
259 memset(mgmt, 0, 24);
260 memcpy(mgmt->da, wk->filter_ta, ETH_ALEN);
261 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
262 memcpy(mgmt->bssid, wk->filter_ta, ETH_ALEN);
263
264 if (!is_zero_ether_addr(wk->assoc.prev_bssid)) {
265 skb_put(skb, 10);
266 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
267 IEEE80211_STYPE_REASSOC_REQ);
268 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
269 mgmt->u.reassoc_req.listen_interval =
270 cpu_to_le16(local->hw.conf.listen_interval);
271 memcpy(mgmt->u.reassoc_req.current_ap, wk->assoc.prev_bssid,
272 ETH_ALEN);
273 } else {
274 skb_put(skb, 4);
275 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
276 IEEE80211_STYPE_ASSOC_REQ);
277 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
278 mgmt->u.assoc_req.listen_interval =
279 cpu_to_le16(local->hw.conf.listen_interval);
280 }
281
282 /* SSID */
283 ies = pos = skb_put(skb, 2 + wk->assoc.ssid_len);
284 *pos++ = WLAN_EID_SSID;
285 *pos++ = wk->assoc.ssid_len;
286 memcpy(pos, wk->assoc.ssid, wk->assoc.ssid_len);
287
288 /* add all rates which were marked to be used above */
289 supp_rates_len = rates_len;
290 if (supp_rates_len > 8)
291 supp_rates_len = 8;
292
293 len = sband->n_bitrates;
294 pos = skb_put(skb, supp_rates_len + 2);
295 *pos++ = WLAN_EID_SUPP_RATES;
296 *pos++ = supp_rates_len;
297
298 count = 0;
299 for (i = 0; i < sband->n_bitrates; i++) {
300 if (BIT(i) & rates) {
301 int rate = sband->bitrates[i].bitrate;
302 *pos++ = (u8) (rate / 5);
303 if (++count == 8)
304 break;
305 }
306 }
307
308 if (rates_len > count) {
309 pos = skb_put(skb, rates_len - count + 2);
310 *pos++ = WLAN_EID_EXT_SUPP_RATES;
311 *pos++ = rates_len - count;
312
313 for (i++; i < sband->n_bitrates; i++) {
314 if (BIT(i) & rates) {
315 int rate = sband->bitrates[i].bitrate;
316 *pos++ = (u8) (rate / 5);
317 }
318 }
319 }
320
321 if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
322 /* 1. power capabilities */
323 pos = skb_put(skb, 4);
324 *pos++ = WLAN_EID_PWR_CAPABILITY;
325 *pos++ = 2;
326 *pos++ = 0; /* min tx power */
327 *pos++ = wk->chan->max_power; /* max tx power */
328
329 /* 2. supported channels */
330 /* TODO: get this in reg domain format */
331 pos = skb_put(skb, 2 * sband->n_channels + 2);
332 *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
333 *pos++ = 2 * sband->n_channels;
334 for (i = 0; i < sband->n_channels; i++) {
335 *pos++ = ieee80211_frequency_to_channel(
336 sband->channels[i].center_freq);
337 *pos++ = 1; /* one channel in the subband*/
338 }
339 }
340
341 /* if present, add any custom IEs that go before HT */
342 if (wk->ie_len && wk->ie) {
343 static const u8 before_ht[] = {
344 WLAN_EID_SSID,
345 WLAN_EID_SUPP_RATES,
346 WLAN_EID_EXT_SUPP_RATES,
347 WLAN_EID_PWR_CAPABILITY,
348 WLAN_EID_SUPPORTED_CHANNELS,
349 WLAN_EID_RSN,
350 WLAN_EID_QOS_CAPA,
351 WLAN_EID_RRM_ENABLED_CAPABILITIES,
352 WLAN_EID_MOBILITY_DOMAIN,
353 WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
354 };
355 noffset = ieee80211_ie_split(wk->ie, wk->ie_len,
356 before_ht, ARRAY_SIZE(before_ht),
357 offset);
358 pos = skb_put(skb, noffset - offset);
359 memcpy(pos, wk->ie + offset, noffset - offset);
360 offset = noffset;
361 }
362
363 if (wk->assoc.use_11n && wk->assoc.wmm_used &&
364 local->hw.queues >= 4)
365 ieee80211_add_ht_ie(skb, wk->assoc.ht_information_ie,
366 sband, wk->chan, wk->assoc.smps);
367
368 /* if present, add any custom non-vendor IEs that go after HT */
369 if (wk->ie_len && wk->ie) {
370 noffset = ieee80211_ie_split_vendor(wk->ie, wk->ie_len,
371 offset);
372 pos = skb_put(skb, noffset - offset);
373 memcpy(pos, wk->ie + offset, noffset - offset);
374 offset = noffset;
375 }
376
377 if (wk->assoc.wmm_used && local->hw.queues >= 4) {
378 if (wk->assoc.uapsd_used) {
379 qos_info = local->uapsd_queues;
380 qos_info |= (local->uapsd_max_sp_len <<
381 IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT);
382 } else {
383 qos_info = 0;
384 }
385
386 pos = skb_put(skb, 9);
387 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
388 *pos++ = 7; /* len */
389 *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
390 *pos++ = 0x50;
391 *pos++ = 0xf2;
392 *pos++ = 2; /* WME */
393 *pos++ = 0; /* WME info */
394 *pos++ = 1; /* WME ver */
395 *pos++ = qos_info;
396 }
397
398 /* add any remaining custom (i.e. vendor specific here) IEs */
399 if (wk->ie_len && wk->ie) {
400 noffset = wk->ie_len;
401 pos = skb_put(skb, noffset - offset);
402 memcpy(pos, wk->ie + offset, noffset - offset);
403 }
404
405 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
406 ieee80211_tx_skb(sdata, skb);
407}
408
409static void ieee80211_remove_auth_bss(struct ieee80211_local *local,
410 struct ieee80211_work *wk)
411{
412 struct cfg80211_bss *cbss;
413 u16 capa_val = WLAN_CAPABILITY_ESS;
414
415 if (wk->probe_auth.privacy)
416 capa_val |= WLAN_CAPABILITY_PRIVACY;
417
418 cbss = cfg80211_get_bss(local->hw.wiphy, wk->chan, wk->filter_ta,
419 wk->probe_auth.ssid, wk->probe_auth.ssid_len,
420 WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY,
421 capa_val);
422 if (!cbss)
423 return;
424
425 cfg80211_unlink_bss(local->hw.wiphy, cbss);
426 cfg80211_put_bss(cbss);
427}
428
429static enum work_action __must_check
430ieee80211_direct_probe(struct ieee80211_work *wk)
431{
432 struct ieee80211_sub_if_data *sdata = wk->sdata;
433 struct ieee80211_local *local = sdata->local;
434
435 wk->probe_auth.tries++;
436 if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
437 printk(KERN_DEBUG "%s: direct probe to %pM timed out\n",
438 sdata->name, wk->filter_ta);
439
440 /*
441 * Most likely AP is not in the range so remove the
442 * bss struct for that AP.
443 */
444 ieee80211_remove_auth_bss(local, wk);
445
446 return WORK_ACT_TIMEOUT;
447 }
448
449 printk(KERN_DEBUG "%s: direct probe to %pM (try %d)\n",
450 sdata->name, wk->filter_ta, wk->probe_auth.tries);
451
452 /*
453 * Direct probe is sent to broadcast address as some APs
454 * will not answer to direct packet in unassociated state.
455 */
456 ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid,
457 wk->probe_auth.ssid_len, NULL, 0);
458
459 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
460 run_again(local, wk->timeout);
461
462 return WORK_ACT_NONE;
463}
464
465
466static enum work_action __must_check
467ieee80211_authenticate(struct ieee80211_work *wk)
468{
469 struct ieee80211_sub_if_data *sdata = wk->sdata;
470 struct ieee80211_local *local = sdata->local;
471
472 wk->probe_auth.tries++;
473 if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
474 printk(KERN_DEBUG "%s: authentication with %pM"
475 " timed out\n", sdata->name, wk->filter_ta);
476
477 /*
478 * Most likely AP is not in the range so remove the
479 * bss struct for that AP.
480 */
481 ieee80211_remove_auth_bss(local, wk);
482
483 return WORK_ACT_TIMEOUT;
484 }
485
486 printk(KERN_DEBUG "%s: authenticate with %pM (try %d)\n",
487 sdata->name, wk->filter_ta, wk->probe_auth.tries);
488
489 ieee80211_send_auth(sdata, 1, wk->probe_auth.algorithm, wk->ie,
490 wk->ie_len, wk->filter_ta, NULL, 0, 0);
491 wk->probe_auth.transaction = 2;
492
493 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
494 run_again(local, wk->timeout);
495
496 return WORK_ACT_NONE;
497}
498
499static enum work_action __must_check
500ieee80211_associate(struct ieee80211_work *wk)
501{
502 struct ieee80211_sub_if_data *sdata = wk->sdata;
503 struct ieee80211_local *local = sdata->local;
504
505 wk->assoc.tries++;
506 if (wk->assoc.tries > IEEE80211_ASSOC_MAX_TRIES) {
507 printk(KERN_DEBUG "%s: association with %pM"
508 " timed out\n",
509 sdata->name, wk->filter_ta);
510
511 /*
512 * Most likely AP is not in the range so remove the
513 * bss struct for that AP.
514 */
515 if (wk->assoc.bss)
516 cfg80211_unlink_bss(local->hw.wiphy, wk->assoc.bss);
517
518 return WORK_ACT_TIMEOUT;
519 }
520
521 printk(KERN_DEBUG "%s: associate with %pM (try %d)\n",
522 sdata->name, wk->filter_ta, wk->assoc.tries);
523 ieee80211_send_assoc(sdata, wk);
524
525 wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
526 run_again(local, wk->timeout);
527
528 return WORK_ACT_NONE;
529}
530
531static enum work_action __must_check
532ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk)
533{
534 /*
535 * First time we run, do nothing -- the generic code will
536 * have switched to the right channel etc.
537 */
538 if (!wk->started) {
539 wk->timeout = jiffies + msecs_to_jiffies(wk->remain.duration);
540
541 cfg80211_ready_on_channel(wk->sdata->dev, (unsigned long) wk,
542 wk->chan, wk->chan_type,
543 wk->remain.duration, GFP_KERNEL);
544
545 return WORK_ACT_NONE;
546 }
547
548 return WORK_ACT_TIMEOUT;
549}
550
551static void ieee80211_auth_challenge(struct ieee80211_work *wk,
552 struct ieee80211_mgmt *mgmt,
553 size_t len)
554{
555 struct ieee80211_sub_if_data *sdata = wk->sdata;
556 u8 *pos;
557 struct ieee802_11_elems elems;
558
559 pos = mgmt->u.auth.variable;
560 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
561 if (!elems.challenge)
562 return;
563 ieee80211_send_auth(sdata, 3, wk->probe_auth.algorithm,
564 elems.challenge - 2, elems.challenge_len + 2,
565 wk->filter_ta, wk->probe_auth.key,
566 wk->probe_auth.key_len, wk->probe_auth.key_idx);
567 wk->probe_auth.transaction = 4;
568}
569
570static enum work_action __must_check
571ieee80211_rx_mgmt_auth(struct ieee80211_work *wk,
572 struct ieee80211_mgmt *mgmt, size_t len)
573{
574 u16 auth_alg, auth_transaction, status_code;
575
576 if (wk->type != IEEE80211_WORK_AUTH)
577 return WORK_ACT_NONE;
578
579 if (len < 24 + 6)
580 return WORK_ACT_NONE;
581
582 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
583 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
584 status_code = le16_to_cpu(mgmt->u.auth.status_code);
585
586 if (auth_alg != wk->probe_auth.algorithm ||
587 auth_transaction != wk->probe_auth.transaction)
588 return WORK_ACT_NONE;
589
590 if (status_code != WLAN_STATUS_SUCCESS) {
591 printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n",
592 wk->sdata->name, mgmt->sa, status_code);
593 return WORK_ACT_DONE;
594 }
595
596 switch (wk->probe_auth.algorithm) {
597 case WLAN_AUTH_OPEN:
598 case WLAN_AUTH_LEAP:
599 case WLAN_AUTH_FT:
600 break;
601 case WLAN_AUTH_SHARED_KEY:
602 if (wk->probe_auth.transaction != 4) {
603 ieee80211_auth_challenge(wk, mgmt, len);
604 /* need another frame */
605 return WORK_ACT_NONE;
606 }
607 break;
608 default:
609 WARN_ON(1);
610 return WORK_ACT_NONE;
611 }
612
613 printk(KERN_DEBUG "%s: authenticated\n", wk->sdata->name);
614 return WORK_ACT_DONE;
615}
616
617static enum work_action __must_check
618ieee80211_rx_mgmt_assoc_resp(struct ieee80211_work *wk,
619 struct ieee80211_mgmt *mgmt, size_t len,
620 bool reassoc)
621{
622 struct ieee80211_sub_if_data *sdata = wk->sdata;
623 struct ieee80211_local *local = sdata->local;
624 u16 capab_info, status_code, aid;
625 struct ieee802_11_elems elems;
626 u8 *pos;
627
628 /*
629 * AssocResp and ReassocResp have identical structure, so process both
630 * of them in this function.
631 */
632
633 if (len < 24 + 6)
634 return WORK_ACT_NONE;
635
636 capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
637 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
638 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
639
640 printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
641 "status=%d aid=%d)\n",
642 sdata->name, reassoc ? "Rea" : "A", mgmt->sa,
643 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
644
645 pos = mgmt->u.assoc_resp.variable;
646 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
647
648 if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
649 elems.timeout_int && elems.timeout_int_len == 5 &&
650 elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
651 u32 tu, ms;
652 tu = get_unaligned_le32(elems.timeout_int + 1);
653 ms = tu * 1024 / 1000;
654 printk(KERN_DEBUG "%s: %pM rejected association temporarily; "
655 "comeback duration %u TU (%u ms)\n",
656 sdata->name, mgmt->sa, tu, ms);
657 wk->timeout = jiffies + msecs_to_jiffies(ms);
658 if (ms > IEEE80211_ASSOC_TIMEOUT)
659 run_again(local, wk->timeout);
660 return WORK_ACT_NONE;
661 }
662
663 if (status_code != WLAN_STATUS_SUCCESS)
664 printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n",
665 sdata->name, mgmt->sa, status_code);
666 else
667 printk(KERN_DEBUG "%s: associated\n", sdata->name);
668
669 return WORK_ACT_DONE;
670}
671
672static enum work_action __must_check
673ieee80211_rx_mgmt_probe_resp(struct ieee80211_work *wk,
674 struct ieee80211_mgmt *mgmt, size_t len,
675 struct ieee80211_rx_status *rx_status)
676{
677 struct ieee80211_sub_if_data *sdata = wk->sdata;
678 struct ieee80211_local *local = sdata->local;
679 size_t baselen;
680
681 ASSERT_WORK_MTX(local);
682
683 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
684 if (baselen > len)
685 return WORK_ACT_NONE;
686
687 printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
688 return WORK_ACT_DONE;
689}
690
691static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
692 struct sk_buff *skb)
693{
694 struct ieee80211_rx_status *rx_status;
695 struct ieee80211_mgmt *mgmt;
696 struct ieee80211_work *wk;
697 enum work_action rma = WORK_ACT_NONE;
698 u16 fc;
699
700 rx_status = (struct ieee80211_rx_status *) skb->cb;
701 mgmt = (struct ieee80211_mgmt *) skb->data;
702 fc = le16_to_cpu(mgmt->frame_control);
703
704 mutex_lock(&local->work_mtx);
705
706 list_for_each_entry(wk, &local->work_list, list) {
707 const u8 *bssid = NULL;
708
709 switch (wk->type) {
710 case IEEE80211_WORK_DIRECT_PROBE:
711 case IEEE80211_WORK_AUTH:
712 case IEEE80211_WORK_ASSOC:
713 bssid = wk->filter_ta;
714 break;
715 default:
716 continue;
717 }
718
719 /*
720 * Before queuing, we already verified mgmt->sa,
721 * so this is needed just for matching.
722 */
723 if (compare_ether_addr(bssid, mgmt->bssid))
724 continue;
725
726 switch (fc & IEEE80211_FCTL_STYPE) {
727 case IEEE80211_STYPE_PROBE_RESP:
728 rma = ieee80211_rx_mgmt_probe_resp(wk, mgmt, skb->len,
729 rx_status);
730 break;
731 case IEEE80211_STYPE_AUTH:
732 rma = ieee80211_rx_mgmt_auth(wk, mgmt, skb->len);
733 break;
734 case IEEE80211_STYPE_ASSOC_RESP:
735 rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
736 skb->len, false);
737 break;
738 case IEEE80211_STYPE_REASSOC_RESP:
739 rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
740 skb->len, true);
741 break;
742 default:
743 WARN_ON(1);
744 }
745 /*
746 * We've processed this frame for that work, so it can't
747 * belong to another work struct.
748 * NB: this is also required for correctness for 'rma'!
749 */
750 break;
751 }
752
753 switch (rma) {
754 case WORK_ACT_NONE:
755 break;
756 case WORK_ACT_DONE:
757 list_del_rcu(&wk->list);
758 break;
759 default:
760 WARN(1, "unexpected: %d", rma);
761 }
762
763 mutex_unlock(&local->work_mtx);
764
765 if (rma != WORK_ACT_DONE)
766 goto out;
767
768 switch (wk->done(wk, skb)) {
769 case WORK_DONE_DESTROY:
770 free_work(wk);
771 break;
772 case WORK_DONE_REQUEUE:
773 synchronize_rcu();
774 wk->started = false; /* restart */
775 mutex_lock(&local->work_mtx);
776 list_add_tail(&wk->list, &local->work_list);
777 mutex_unlock(&local->work_mtx);
778 }
779
780 out:
781 kfree_skb(skb);
782}
783
784static void ieee80211_work_timer(unsigned long data)
785{
786 struct ieee80211_local *local = (void *) data;
787
788 if (local->quiescing)
789 return;
790
791 ieee80211_queue_work(&local->hw, &local->work_work);
792}
793
794static void ieee80211_work_work(struct work_struct *work)
795{
796 struct ieee80211_local *local =
797 container_of(work, struct ieee80211_local, work_work);
798 struct sk_buff *skb;
799 struct ieee80211_work *wk, *tmp;
800 LIST_HEAD(free_work);
801 enum work_action rma;
802 bool remain_off_channel = false;
803
804 if (local->scanning)
805 return;
806
807 /*
808 * ieee80211_queue_work() should have picked up most cases,
809 * here we'll pick the the rest.
810 */
811 if (WARN(local->suspended, "work scheduled while going to suspend\n"))
812 return;
813
814 /* first process frames to avoid timing out while a frame is pending */
815 while ((skb = skb_dequeue(&local->work_skb_queue)))
816 ieee80211_work_rx_queued_mgmt(local, skb);
817
818 ieee80211_recalc_idle(local);
819
820 mutex_lock(&local->work_mtx);
821
822 list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
823 bool started = wk->started;
824
825 /* mark work as started if it's on the current off-channel */
826 if (!started && local->tmp_channel &&
827 wk->chan == local->tmp_channel &&
828 wk->chan_type == local->tmp_channel_type) {
829 started = true;
830 wk->timeout = jiffies;
831 }
832
833 if (!started && !local->tmp_channel) {
834 /*
835 * TODO: could optimize this by leaving the
836 * station vifs in awake mode if they
837 * happen to be on the same channel as
838 * the requested channel
839 */
840 ieee80211_offchannel_stop_beaconing(local);
841 ieee80211_offchannel_stop_station(local);
842
843 local->tmp_channel = wk->chan;
844 local->tmp_channel_type = wk->chan_type;
845 ieee80211_hw_config(local, 0);
846 started = true;
847 wk->timeout = jiffies;
848 }
849
850 /* don't try to work with items that aren't started */
851 if (!started)
852 continue;
853
854 if (time_is_after_jiffies(wk->timeout)) {
855 /*
856 * This work item isn't supposed to be worked on
857 * right now, but take care to adjust the timer
858 * properly.
859 */
860 run_again(local, wk->timeout);
861 continue;
862 }
863
864 switch (wk->type) {
865 default:
866 WARN_ON(1);
867 /* nothing */
868 rma = WORK_ACT_NONE;
869 break;
870 case IEEE80211_WORK_ABORT:
871 rma = WORK_ACT_TIMEOUT;
872 case IEEE80211_WORK_DIRECT_PROBE:
873 rma = ieee80211_direct_probe(wk);
874 break;
875 case IEEE80211_WORK_AUTH:
876 rma = ieee80211_authenticate(wk);
877 break;
878 case IEEE80211_WORK_ASSOC:
879 rma = ieee80211_associate(wk);
880 break;
881 case IEEE80211_WORK_REMAIN_ON_CHANNEL:
882 rma = ieee80211_remain_on_channel_timeout(wk);
883 break;
884 }
885
886 wk->started = started;
887
888 switch (rma) {
889 case WORK_ACT_NONE:
890 /* might have changed the timeout */
891 run_again(local, wk->timeout);
892 break;
893 case WORK_ACT_TIMEOUT:
894 list_del_rcu(&wk->list);
895 synchronize_rcu();
896 list_add(&wk->list, &free_work);
897 break;
898 default:
899 WARN(1, "unexpected: %d", rma);
900 }
901 }
902
903 list_for_each_entry(wk, &local->work_list, list) {
904 if (!wk->started)
905 continue;
906 if (wk->chan != local->tmp_channel)
907 continue;
908 if (wk->chan_type != local->tmp_channel_type)
909 continue;
910 remain_off_channel = true;
911 }
912
913 if (!remain_off_channel && local->tmp_channel) {
914 local->tmp_channel = NULL;
915 ieee80211_hw_config(local, 0);
916 ieee80211_offchannel_return(local, true);
917 /* give connection some time to breathe */
918 run_again(local, jiffies + HZ/2);
919 }
920
921 if (list_empty(&local->work_list) && local->scan_req)
922 ieee80211_queue_delayed_work(&local->hw,
923 &local->scan_work,
924 round_jiffies_relative(0));
925
926 mutex_unlock(&local->work_mtx);
927
928 ieee80211_recalc_idle(local);
929
930 list_for_each_entry_safe(wk, tmp, &free_work, list) {
931 wk->done(wk, NULL);
932 list_del(&wk->list);
933 kfree(wk);
934 }
935}
936
937void ieee80211_add_work(struct ieee80211_work *wk)
938{
939 struct ieee80211_local *local;
940
941 if (WARN_ON(!wk->chan))
942 return;
943
944 if (WARN_ON(!wk->sdata))
945 return;
946
947 if (WARN_ON(!wk->done))
948 return;
949
950 if (WARN_ON(!ieee80211_sdata_running(wk->sdata)))
951 return;
952
953 wk->started = false;
954
955 local = wk->sdata->local;
956 mutex_lock(&local->work_mtx);
957 list_add_tail(&wk->list, &local->work_list);
958 mutex_unlock(&local->work_mtx);
959
960 ieee80211_queue_work(&local->hw, &local->work_work);
961}
962
963void ieee80211_work_init(struct ieee80211_local *local)
964{
965 mutex_init(&local->work_mtx);
966 INIT_LIST_HEAD(&local->work_list);
967 setup_timer(&local->work_timer, ieee80211_work_timer,
968 (unsigned long)local);
969 INIT_WORK(&local->work_work, ieee80211_work_work);
970 skb_queue_head_init(&local->work_skb_queue);
971}
972
973void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
974{
975 struct ieee80211_local *local = sdata->local;
976 struct ieee80211_work *wk;
977
978 mutex_lock(&local->work_mtx);
979 list_for_each_entry(wk, &local->work_list, list) {
980 if (wk->sdata != sdata)
981 continue;
982 wk->type = IEEE80211_WORK_ABORT;
983 wk->started = true;
984 wk->timeout = jiffies;
985 }
986 mutex_unlock(&local->work_mtx);
987
988 /* run cleanups etc. */
989 ieee80211_work_work(&local->work_work);
990
991 mutex_lock(&local->work_mtx);
992 list_for_each_entry(wk, &local->work_list, list) {
993 if (wk->sdata != sdata)
994 continue;
995 WARN_ON(1);
996 break;
997 }
998 mutex_unlock(&local->work_mtx);
999}
1000
1001ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
1002 struct sk_buff *skb)
1003{
1004 struct ieee80211_local *local = sdata->local;
1005 struct ieee80211_mgmt *mgmt;
1006 struct ieee80211_work *wk;
1007 u16 fc;
1008
1009 if (skb->len < 24)
1010 return RX_DROP_MONITOR;
1011
1012 mgmt = (struct ieee80211_mgmt *) skb->data;
1013 fc = le16_to_cpu(mgmt->frame_control);
1014
1015 list_for_each_entry_rcu(wk, &local->work_list, list) {
1016 if (sdata != wk->sdata)
1017 continue;
1018 if (compare_ether_addr(wk->filter_ta, mgmt->sa))
1019 continue;
1020 if (compare_ether_addr(wk->filter_ta, mgmt->bssid))
1021 continue;
1022
1023 switch (fc & IEEE80211_FCTL_STYPE) {
1024 case IEEE80211_STYPE_AUTH:
1025 case IEEE80211_STYPE_PROBE_RESP:
1026 case IEEE80211_STYPE_ASSOC_RESP:
1027 case IEEE80211_STYPE_REASSOC_RESP:
1028 skb_queue_tail(&local->work_skb_queue, skb);
1029 ieee80211_queue_work(&local->hw, &local->work_work);
1030 return RX_QUEUED;
1031 }
1032 }
1033
1034 return RX_CONTINUE;
1035}
1036
1037static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk,
1038 struct sk_buff *skb)
1039{
1040 /*
1041 * We are done serving the remain-on-channel command.
1042 */
1043 cfg80211_remain_on_channel_expired(wk->sdata->dev, (unsigned long) wk,
1044 wk->chan, wk->chan_type,
1045 GFP_KERNEL);
1046
1047 return WORK_DONE_DESTROY;
1048}
1049
1050int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1051 struct ieee80211_channel *chan,
1052 enum nl80211_channel_type channel_type,
1053 unsigned int duration, u64 *cookie)
1054{
1055 struct ieee80211_work *wk;
1056
1057 wk = kzalloc(sizeof(*wk), GFP_KERNEL);
1058 if (!wk)
1059 return -ENOMEM;
1060
1061 wk->type = IEEE80211_WORK_REMAIN_ON_CHANNEL;
1062 wk->chan = chan;
1063 wk->chan_type = channel_type;
1064 wk->sdata = sdata;
1065 wk->done = ieee80211_remain_done;
1066
1067 wk->remain.duration = duration;
1068
1069 *cookie = (unsigned long) wk;
1070
1071 ieee80211_add_work(wk);
1072
1073 return 0;
1074}
1075
1076int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1077 u64 cookie)
1078{
1079 struct ieee80211_local *local = sdata->local;
1080 struct ieee80211_work *wk, *tmp;
1081 bool found = false;
1082
1083 mutex_lock(&local->work_mtx);
1084 list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
1085 if ((unsigned long) wk == cookie) {
1086 wk->timeout = jiffies;
1087 found = true;
1088 break;
1089 }
1090 }
1091 mutex_unlock(&local->work_mtx);
1092
1093 if (!found)
1094 return -ENOENT;
1095
1096 ieee80211_queue_work(&local->hw, &local->work_work);
1097
1098 return 0;
1099}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 5332014cb22..f4971cd45c6 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -31,8 +31,8 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
31 unsigned int hdrlen; 31 unsigned int hdrlen;
32 struct ieee80211_hdr *hdr; 32 struct ieee80211_hdr *hdr;
33 struct sk_buff *skb = tx->skb; 33 struct sk_buff *skb = tx->skb;
34 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
34 int authenticator; 35 int authenticator;
35 int wpa_test = 0;
36 int tail; 36 int tail;
37 37
38 hdr = (struct ieee80211_hdr *)skb->data; 38 hdr = (struct ieee80211_hdr *)skb->data;
@@ -47,16 +47,15 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
47 data = skb->data + hdrlen; 47 data = skb->data + hdrlen;
48 data_len = skb->len - hdrlen; 48 data_len = skb->len - hdrlen;
49 49
50 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 50 if (info->control.hw_key &&
51 !(tx->flags & IEEE80211_TX_FRAGMENTED) && 51 !(tx->flags & IEEE80211_TX_FRAGMENTED) &&
52 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) && 52 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
53 !wpa_test) { 53 /* hwaccel - with no need for SW-generated MMIC */
54 /* hwaccel - with no need for preallocated room for MMIC */
55 return TX_CONTINUE; 54 return TX_CONTINUE;
56 } 55 }
57 56
58 tail = MICHAEL_MIC_LEN; 57 tail = MICHAEL_MIC_LEN;
59 if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) 58 if (!info->control.hw_key)
60 tail += TKIP_ICV_LEN; 59 tail += TKIP_ICV_LEN;
61 60
62 if (WARN_ON(skb_tailroom(skb) < tail || 61 if (WARN_ON(skb_tailroom(skb) < tail ||
@@ -147,17 +146,16 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
147 int len, tail; 146 int len, tail;
148 u8 *pos; 147 u8 *pos;
149 148
150 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 149 if (info->control.hw_key &&
151 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { 150 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
152 /* hwaccel - with no need for preallocated room for IV/ICV */ 151 /* hwaccel - with no need for software-generated IV */
153 info->control.hw_key = &tx->key->conf;
154 return 0; 152 return 0;
155 } 153 }
156 154
157 hdrlen = ieee80211_hdrlen(hdr->frame_control); 155 hdrlen = ieee80211_hdrlen(hdr->frame_control);
158 len = skb->len - hdrlen; 156 len = skb->len - hdrlen;
159 157
160 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) 158 if (info->control.hw_key)
161 tail = 0; 159 tail = 0;
162 else 160 else
163 tail = TKIP_ICV_LEN; 161 tail = TKIP_ICV_LEN;
@@ -175,13 +173,11 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
175 if (key->u.tkip.tx.iv16 == 0) 173 if (key->u.tkip.tx.iv16 == 0)
176 key->u.tkip.tx.iv32++; 174 key->u.tkip.tx.iv32++;
177 175
178 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 176 pos = ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16);
179 /* hwaccel - with preallocated room for IV */
180 ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16);
181 177
182 info->control.hw_key = &tx->key->conf; 178 /* hwaccel - with software IV */
179 if (info->control.hw_key)
183 return 0; 180 return 0;
184 }
185 181
186 /* Add room for ICV */ 182 /* Add room for ICV */
187 skb_put(skb, TKIP_ICV_LEN); 183 skb_put(skb, TKIP_ICV_LEN);
@@ -363,24 +359,20 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
363 int hdrlen, len, tail; 359 int hdrlen, len, tail;
364 u8 *pos, *pn; 360 u8 *pos, *pn;
365 int i; 361 int i;
366 bool skip_hw;
367
368 skip_hw = (tx->key->conf.flags & IEEE80211_KEY_FLAG_SW_MGMT) &&
369 ieee80211_is_mgmt(hdr->frame_control);
370 362
371 if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && 363 if (info->control.hw_key &&
372 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) && 364 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
373 !skip_hw) { 365 /*
374 /* hwaccel - with no need for preallocated room for CCMP 366 * hwaccel has no need for preallocated room for CCMP
375 * header or MIC fields */ 367 * header or MIC fields
376 info->control.hw_key = &tx->key->conf; 368 */
377 return 0; 369 return 0;
378 } 370 }
379 371
380 hdrlen = ieee80211_hdrlen(hdr->frame_control); 372 hdrlen = ieee80211_hdrlen(hdr->frame_control);
381 len = skb->len - hdrlen; 373 len = skb->len - hdrlen;
382 374
383 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) 375 if (info->control.hw_key)
384 tail = 0; 376 tail = 0;
385 else 377 else
386 tail = CCMP_MIC_LEN; 378 tail = CCMP_MIC_LEN;
@@ -405,11 +397,9 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
405 397
406 ccmp_pn2hdr(pos, pn, key->conf.keyidx); 398 ccmp_pn2hdr(pos, pn, key->conf.keyidx);
407 399
408 if ((key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && !skip_hw) { 400 /* hwaccel - with software CCMP header */
409 /* hwaccel - with preallocated room for CCMP header */ 401 if (info->control.hw_key)
410 info->control.hw_key = &tx->key->conf;
411 return 0; 402 return 0;
412 }
413 403
414 pos += CCMP_HDR_LEN; 404 pos += CCMP_HDR_LEN;
415 ccmp_special_blocks(skb, pn, key->u.ccmp.tx_crypto_buf, 0); 405 ccmp_special_blocks(skb, pn, key->u.ccmp.tx_crypto_buf, 0);
@@ -525,11 +515,8 @@ ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
525 u8 *pn, aad[20]; 515 u8 *pn, aad[20];
526 int i; 516 int i;
527 517
528 if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 518 if (info->control.hw_key)
529 /* hwaccel */
530 info->control.hw_key = &tx->key->conf;
531 return 0; 519 return 0;
532 }
533 520
534 if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) 521 if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
535 return TX_DROP; 522 return TX_DROP;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 634d14affc8..18d77b5c351 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -83,6 +83,19 @@ config NF_CONNTRACK_SECMARK
83 83
84 If unsure, say 'N'. 84 If unsure, say 'N'.
85 85
86config NF_CONNTRACK_ZONES
87 bool 'Connection tracking zones'
88 depends on NETFILTER_ADVANCED
89 depends on NETFILTER_XT_TARGET_CT
90 help
91 This option enables support for connection tracking zones.
92 Normally, each connection needs to have a unique system wide
93 identity. Connection tracking zones allow to have multiple
94 connections using the same identity, as long as they are
95 contained in different zones.
96
97 If unsure, say `N'.
98
86config NF_CONNTRACK_EVENTS 99config NF_CONNTRACK_EVENTS
87 bool "Connection tracking events" 100 bool "Connection tracking events"
88 depends on NETFILTER_ADVANCED 101 depends on NETFILTER_ADVANCED
@@ -341,6 +354,18 @@ config NETFILTER_XT_TARGET_CONNSECMARK
341 354
342 To compile it as a module, choose M here. If unsure, say N. 355 To compile it as a module, choose M here. If unsure, say N.
343 356
357config NETFILTER_XT_TARGET_CT
358 tristate '"CT" target support'
359 depends on NF_CONNTRACK
360 depends on IP_NF_RAW || IP6_NF_RAW
361 depends on NETFILTER_ADVANCED
362 help
363 This options adds a `CT' target, which allows to specify initial
364 connection tracking parameters like events to be delivered and
365 the helper to be used.
366
367 To compile it as a module, choose M here. If unsure, say N.
368
344config NETFILTER_XT_TARGET_DSCP 369config NETFILTER_XT_TARGET_DSCP
345 tristate '"DSCP" and "TOS" target support' 370 tristate '"DSCP" and "TOS" target support'
346 depends on IP_NF_MANGLE || IP6_NF_MANGLE 371 depends on IP_NF_MANGLE || IP6_NF_MANGLE
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 49f62ee4e9f..f873644f02f 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
44obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o 44obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
45obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o 45obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
46obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o 46obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
47obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
47obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o 48obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
48obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o 49obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
49obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o 50obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index f2d76238b9b..712ccad1334 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -68,6 +68,10 @@ config IP_VS_TAB_BITS
68 each hash entry uses 8 bytes, so you can estimate how much memory is 68 each hash entry uses 8 bytes, so you can estimate how much memory is
69 needed for your box. 69 needed for your box.
70 70
71 You can overwrite this number setting conn_tab_bits module parameter
72 or by appending ip_vs.conn_tab_bits=? to the kernel command line
73 if IP VS was compiled built-in.
74
71comment "IPVS transport protocol load balancing support" 75comment "IPVS transport protocol load balancing support"
72 76
73config IP_VS_PROTO_TCP 77config IP_VS_PROTO_TCP
@@ -100,6 +104,13 @@ config IP_VS_PROTO_AH
100 This option enables support for load balancing AH (Authentication 104 This option enables support for load balancing AH (Authentication
101 Header) transport protocol. Say Y if unsure. 105 Header) transport protocol. Say Y if unsure.
102 106
107config IP_VS_PROTO_SCTP
108 bool "SCTP load balancing support"
109 select LIBCRC32C
110 ---help---
111 This option enables support for load balancing SCTP transport
112 protocol. Say Y if unsure.
113
103comment "IPVS scheduler" 114comment "IPVS scheduler"
104 115
105config IP_VS_RR 116config IP_VS_RR
diff --git a/net/netfilter/ipvs/Makefile b/net/netfilter/ipvs/Makefile
index 73a46fe1fe4..e3baefd7066 100644
--- a/net/netfilter/ipvs/Makefile
+++ b/net/netfilter/ipvs/Makefile
@@ -7,6 +7,7 @@ ip_vs_proto-objs-y :=
7ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o 7ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o
8ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o 8ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o
9ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH_ESP) += ip_vs_proto_ah_esp.o 9ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH_ESP) += ip_vs_proto_ah_esp.o
10ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_SCTP) += ip_vs_proto_sctp.o
10 11
11ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \ 12ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \
12 ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \ 13 ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 27c30cf933d..60bb41a8d8d 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -40,6 +40,21 @@
40#include <net/ip_vs.h> 40#include <net/ip_vs.h>
41 41
42 42
43#ifndef CONFIG_IP_VS_TAB_BITS
44#define CONFIG_IP_VS_TAB_BITS 12
45#endif
46
47/*
48 * Connection hash size. Default is what was selected at compile time.
49*/
50int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
51module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444);
52MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size");
53
54/* size and mask values */
55int ip_vs_conn_tab_size;
56int ip_vs_conn_tab_mask;
57
43/* 58/*
44 * Connection hash table: for input and output packets lookups of IPVS 59 * Connection hash table: for input and output packets lookups of IPVS
45 */ 60 */
@@ -125,11 +140,11 @@ static unsigned int ip_vs_conn_hashkey(int af, unsigned proto,
125 if (af == AF_INET6) 140 if (af == AF_INET6)
126 return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd), 141 return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
127 (__force u32)port, proto, ip_vs_conn_rnd) 142 (__force u32)port, proto, ip_vs_conn_rnd)
128 & IP_VS_CONN_TAB_MASK; 143 & ip_vs_conn_tab_mask;
129#endif 144#endif
130 return jhash_3words((__force u32)addr->ip, (__force u32)port, proto, 145 return jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
131 ip_vs_conn_rnd) 146 ip_vs_conn_rnd)
132 & IP_VS_CONN_TAB_MASK; 147 & ip_vs_conn_tab_mask;
133} 148}
134 149
135 150
@@ -760,7 +775,7 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
760 int idx; 775 int idx;
761 struct ip_vs_conn *cp; 776 struct ip_vs_conn *cp;
762 777
763 for(idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) { 778 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
764 ct_read_lock_bh(idx); 779 ct_read_lock_bh(idx);
765 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { 780 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
766 if (pos-- == 0) { 781 if (pos-- == 0) {
@@ -797,7 +812,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
797 idx = l - ip_vs_conn_tab; 812 idx = l - ip_vs_conn_tab;
798 ct_read_unlock_bh(idx); 813 ct_read_unlock_bh(idx);
799 814
800 while (++idx < IP_VS_CONN_TAB_SIZE) { 815 while (++idx < ip_vs_conn_tab_size) {
801 ct_read_lock_bh(idx); 816 ct_read_lock_bh(idx);
802 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { 817 list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
803 seq->private = &ip_vs_conn_tab[idx]; 818 seq->private = &ip_vs_conn_tab[idx];
@@ -976,8 +991,8 @@ void ip_vs_random_dropentry(void)
976 /* 991 /*
977 * Randomly scan 1/32 of the whole table every second 992 * Randomly scan 1/32 of the whole table every second
978 */ 993 */
979 for (idx = 0; idx < (IP_VS_CONN_TAB_SIZE>>5); idx++) { 994 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
980 unsigned hash = net_random() & IP_VS_CONN_TAB_MASK; 995 unsigned hash = net_random() & ip_vs_conn_tab_mask;
981 996
982 /* 997 /*
983 * Lock is actually needed in this loop. 998 * Lock is actually needed in this loop.
@@ -1029,7 +1044,7 @@ static void ip_vs_conn_flush(void)
1029 struct ip_vs_conn *cp; 1044 struct ip_vs_conn *cp;
1030 1045
1031 flush_again: 1046 flush_again:
1032 for (idx=0; idx<IP_VS_CONN_TAB_SIZE; idx++) { 1047 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
1033 /* 1048 /*
1034 * Lock is actually needed in this loop. 1049 * Lock is actually needed in this loop.
1035 */ 1050 */
@@ -1060,10 +1075,15 @@ int __init ip_vs_conn_init(void)
1060{ 1075{
1061 int idx; 1076 int idx;
1062 1077
1078 /* Compute size and mask */
1079 ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
1080 ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
1081
1063 /* 1082 /*
1064 * Allocate the connection hash table and initialize its list heads 1083 * Allocate the connection hash table and initialize its list heads
1065 */ 1084 */
1066 ip_vs_conn_tab = vmalloc(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head)); 1085 ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size *
1086 sizeof(struct list_head));
1067 if (!ip_vs_conn_tab) 1087 if (!ip_vs_conn_tab)
1068 return -ENOMEM; 1088 return -ENOMEM;
1069 1089
@@ -1078,12 +1098,12 @@ int __init ip_vs_conn_init(void)
1078 1098
1079 pr_info("Connection hash table configured " 1099 pr_info("Connection hash table configured "
1080 "(size=%d, memory=%ldKbytes)\n", 1100 "(size=%d, memory=%ldKbytes)\n",
1081 IP_VS_CONN_TAB_SIZE, 1101 ip_vs_conn_tab_size,
1082 (long)(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head))/1024); 1102 (long)(ip_vs_conn_tab_size*sizeof(struct list_head))/1024);
1083 IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n", 1103 IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n",
1084 sizeof(struct ip_vs_conn)); 1104 sizeof(struct ip_vs_conn));
1085 1105
1086 for (idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) { 1106 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
1087 INIT_LIST_HEAD(&ip_vs_conn_tab[idx]); 1107 INIT_LIST_HEAD(&ip_vs_conn_tab[idx]);
1088 } 1108 }
1089 1109
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 847ffca4018..44590887a92 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -31,6 +31,7 @@
31#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <linux/ip.h> 32#include <linux/ip.h>
33#include <linux/tcp.h> 33#include <linux/tcp.h>
34#include <linux/sctp.h>
34#include <linux/icmp.h> 35#include <linux/icmp.h>
35 36
36#include <net/ip.h> 37#include <net/ip.h>
@@ -81,6 +82,8 @@ const char *ip_vs_proto_name(unsigned proto)
81 return "UDP"; 82 return "UDP";
82 case IPPROTO_TCP: 83 case IPPROTO_TCP:
83 return "TCP"; 84 return "TCP";
85 case IPPROTO_SCTP:
86 return "SCTP";
84 case IPPROTO_ICMP: 87 case IPPROTO_ICMP:
85 return "ICMP"; 88 return "ICMP";
86#ifdef CONFIG_IP_VS_IPV6 89#ifdef CONFIG_IP_VS_IPV6
@@ -512,8 +515,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
512 */ 515 */
513#ifdef CONFIG_IP_VS_IPV6 516#ifdef CONFIG_IP_VS_IPV6
514 if (svc->af == AF_INET6) 517 if (svc->af == AF_INET6)
515 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, 518 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
516 skb->dev);
517 else 519 else
518#endif 520#endif
519 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 521 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
@@ -589,8 +591,9 @@ void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
589 ip_send_check(ciph); 591 ip_send_check(ciph);
590 } 592 }
591 593
592 /* the TCP/UDP port */ 594 /* the TCP/UDP/SCTP port */
593 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol) { 595 if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol ||
596 IPPROTO_SCTP == ciph->protocol) {
594 __be16 *ports = (void *)ciph + ciph->ihl*4; 597 __be16 *ports = (void *)ciph + ciph->ihl*4;
595 598
596 if (inout) 599 if (inout)
@@ -630,8 +633,9 @@ void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
630 ciph->saddr = cp->daddr.in6; 633 ciph->saddr = cp->daddr.in6;
631 } 634 }
632 635
633 /* the TCP/UDP port */ 636 /* the TCP/UDP/SCTP port */
634 if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr) { 637 if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr ||
638 IPPROTO_SCTP == ciph->nexthdr) {
635 __be16 *ports = (void *)ciph + sizeof(struct ipv6hdr); 639 __be16 *ports = (void *)ciph + sizeof(struct ipv6hdr);
636 640
637 if (inout) 641 if (inout)
@@ -679,7 +683,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
679 goto out; 683 goto out;
680 } 684 }
681 685
682 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol) 686 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
687 IPPROTO_SCTP == protocol)
683 offset += 2 * sizeof(__u16); 688 offset += 2 * sizeof(__u16);
684 if (!skb_make_writable(skb, offset)) 689 if (!skb_make_writable(skb, offset))
685 goto out; 690 goto out;
@@ -857,6 +862,21 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related)
857} 862}
858#endif 863#endif
859 864
865/*
866 * Check if sctp chunc is ABORT chunk
867 */
868static inline int is_sctp_abort(const struct sk_buff *skb, int nh_len)
869{
870 sctp_chunkhdr_t *sch, schunk;
871 sch = skb_header_pointer(skb, nh_len + sizeof(sctp_sctphdr_t),
872 sizeof(schunk), &schunk);
873 if (sch == NULL)
874 return 0;
875 if (sch->type == SCTP_CID_ABORT)
876 return 1;
877 return 0;
878}
879
860static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len) 880static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
861{ 881{
862 struct tcphdr _tcph, *th; 882 struct tcphdr _tcph, *th;
@@ -999,7 +1019,8 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb,
999 if (unlikely(!cp)) { 1019 if (unlikely(!cp)) {
1000 if (sysctl_ip_vs_nat_icmp_send && 1020 if (sysctl_ip_vs_nat_icmp_send &&
1001 (pp->protocol == IPPROTO_TCP || 1021 (pp->protocol == IPPROTO_TCP ||
1002 pp->protocol == IPPROTO_UDP)) { 1022 pp->protocol == IPPROTO_UDP ||
1023 pp->protocol == IPPROTO_SCTP)) {
1003 __be16 _ports[2], *pptr; 1024 __be16 _ports[2], *pptr;
1004 1025
1005 pptr = skb_header_pointer(skb, iph.len, 1026 pptr = skb_header_pointer(skb, iph.len,
@@ -1014,14 +1035,19 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb,
1014 * existing entry if it is not RST 1035 * existing entry if it is not RST
1015 * packet or not TCP packet. 1036 * packet or not TCP packet.
1016 */ 1037 */
1017 if (iph.protocol != IPPROTO_TCP 1038 if ((iph.protocol != IPPROTO_TCP &&
1018 || !is_tcp_reset(skb, iph.len)) { 1039 iph.protocol != IPPROTO_SCTP)
1040 || ((iph.protocol == IPPROTO_TCP
1041 && !is_tcp_reset(skb, iph.len))
1042 || (iph.protocol == IPPROTO_SCTP
1043 && !is_sctp_abort(skb,
1044 iph.len)))) {
1019#ifdef CONFIG_IP_VS_IPV6 1045#ifdef CONFIG_IP_VS_IPV6
1020 if (af == AF_INET6) 1046 if (af == AF_INET6)
1021 icmpv6_send(skb, 1047 icmpv6_send(skb,
1022 ICMPV6_DEST_UNREACH, 1048 ICMPV6_DEST_UNREACH,
1023 ICMPV6_PORT_UNREACH, 1049 ICMPV6_PORT_UNREACH,
1024 0, skb->dev); 1050 0);
1025 else 1051 else
1026#endif 1052#endif
1027 icmp_send(skb, 1053 icmp_send(skb,
@@ -1235,7 +1261,8 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
1235 1261
1236 /* do the statistics and put it back */ 1262 /* do the statistics and put it back */
1237 ip_vs_in_stats(cp, skb); 1263 ip_vs_in_stats(cp, skb);
1238 if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr) 1264 if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr ||
1265 IPPROTO_SCTP == cih->nexthdr)
1239 offset += 2 * sizeof(__u16); 1266 offset += 2 * sizeof(__u16);
1240 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset); 1267 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset);
1241 /* do not touch skb anymore */ 1268 /* do not touch skb anymore */
@@ -1358,6 +1385,21 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
1358 * encorage the standby servers to update the connections timeout 1385 * encorage the standby servers to update the connections timeout
1359 */ 1386 */
1360 pkts = atomic_add_return(1, &cp->in_pkts); 1387 pkts = atomic_add_return(1, &cp->in_pkts);
1388 if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
1389 cp->protocol == IPPROTO_SCTP) {
1390 if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
1391 (atomic_read(&cp->in_pkts) %
1392 sysctl_ip_vs_sync_threshold[1]
1393 == sysctl_ip_vs_sync_threshold[0])) ||
1394 (cp->old_state != cp->state &&
1395 ((cp->state == IP_VS_SCTP_S_CLOSED) ||
1396 (cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) ||
1397 (cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) {
1398 ip_vs_sync_conn(cp);
1399 goto out;
1400 }
1401 }
1402
1361 if (af == AF_INET && 1403 if (af == AF_INET &&
1362 (ip_vs_sync_state & IP_VS_STATE_MASTER) && 1404 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
1363 (((cp->protocol != IPPROTO_TCP || 1405 (((cp->protocol != IPPROTO_TCP ||
@@ -1370,6 +1412,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
1370 (cp->state == IP_VS_TCP_S_CLOSE_WAIT) || 1412 (cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
1371 (cp->state == IP_VS_TCP_S_TIME_WAIT))))) 1413 (cp->state == IP_VS_TCP_S_TIME_WAIT)))))
1372 ip_vs_sync_conn(cp); 1414 ip_vs_sync_conn(cp);
1415out:
1373 cp->old_state = cp->state; 1416 cp->old_state = cp->state;
1374 1417
1375 ip_vs_conn_put(cp); 1418 ip_vs_conn_put(cp);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c37ac2d7bec..7ee9c3426f4 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1843,7 +1843,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1843 if (v == SEQ_START_TOKEN) { 1843 if (v == SEQ_START_TOKEN) {
1844 seq_printf(seq, 1844 seq_printf(seq,
1845 "IP Virtual Server version %d.%d.%d (size=%d)\n", 1845 "IP Virtual Server version %d.%d.%d (size=%d)\n",
1846 NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE); 1846 NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size);
1847 seq_puts(seq, 1847 seq_puts(seq,
1848 "Prot LocalAddress:Port Scheduler Flags\n"); 1848 "Prot LocalAddress:Port Scheduler Flags\n");
1849 seq_puts(seq, 1849 seq_puts(seq,
@@ -2132,8 +2132,9 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2132 } 2132 }
2133 } 2133 }
2134 2134
2135 /* Check for valid protocol: TCP or UDP, even for fwmark!=0 */ 2135 /* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */
2136 if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP) { 2136 if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP &&
2137 usvc.protocol != IPPROTO_SCTP) {
2137 pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n", 2138 pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n",
2138 usvc.protocol, &usvc.addr.ip, 2139 usvc.protocol, &usvc.addr.ip,
2139 ntohs(usvc.port), usvc.sched_name); 2140 ntohs(usvc.port), usvc.sched_name);
@@ -2386,7 +2387,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2386 char buf[64]; 2387 char buf[64];
2387 2388
2388 sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)", 2389 sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)",
2389 NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE); 2390 NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size);
2390 if (copy_to_user(user, buf, strlen(buf)+1) != 0) { 2391 if (copy_to_user(user, buf, strlen(buf)+1) != 0) {
2391 ret = -EFAULT; 2392 ret = -EFAULT;
2392 goto out; 2393 goto out;
@@ -2399,7 +2400,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2399 { 2400 {
2400 struct ip_vs_getinfo info; 2401 struct ip_vs_getinfo info;
2401 info.version = IP_VS_VERSION_CODE; 2402 info.version = IP_VS_VERSION_CODE;
2402 info.size = IP_VS_CONN_TAB_SIZE; 2403 info.size = ip_vs_conn_tab_size;
2403 info.num_services = ip_vs_num_services; 2404 info.num_services = ip_vs_num_services;
2404 if (copy_to_user(user, &info, sizeof(info)) != 0) 2405 if (copy_to_user(user, &info, sizeof(info)) != 0)
2405 ret = -EFAULT; 2406 ret = -EFAULT;
@@ -3243,7 +3244,7 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
3243 case IPVS_CMD_GET_INFO: 3244 case IPVS_CMD_GET_INFO:
3244 NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE); 3245 NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE);
3245 NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE, 3246 NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
3246 IP_VS_CONN_TAB_SIZE); 3247 ip_vs_conn_tab_size);
3247 break; 3248 break;
3248 } 3249 }
3249 3250
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 33e2c799cba..73f38ea98f2 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -208,7 +208,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
208 */ 208 */
209 from.ip = n_cp->vaddr.ip; 209 from.ip = n_cp->vaddr.ip;
210 port = n_cp->vport; 210 port = n_cp->vport;
211 sprintf(buf, "%d,%d,%d,%d,%d,%d", NIPQUAD(from.ip), 211 sprintf(buf, "%u,%u,%u,%u,%u,%u", NIPQUAD(from.ip),
212 (ntohs(port)>>8)&255, ntohs(port)&255); 212 (ntohs(port)>>8)&255, ntohs(port)&255);
213 buf_len = strlen(buf); 213 buf_len = strlen(buf);
214 214
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 3e767167454..0e584553819 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -257,6 +257,9 @@ int __init ip_vs_protocol_init(void)
257#ifdef CONFIG_IP_VS_PROTO_UDP 257#ifdef CONFIG_IP_VS_PROTO_UDP
258 REGISTER_PROTOCOL(&ip_vs_protocol_udp); 258 REGISTER_PROTOCOL(&ip_vs_protocol_udp);
259#endif 259#endif
260#ifdef CONFIG_IP_VS_PROTO_SCTP
261 REGISTER_PROTOCOL(&ip_vs_protocol_sctp);
262#endif
260#ifdef CONFIG_IP_VS_PROTO_AH 263#ifdef CONFIG_IP_VS_PROTO_AH
261 REGISTER_PROTOCOL(&ip_vs_protocol_ah); 264 REGISTER_PROTOCOL(&ip_vs_protocol_ah);
262#endif 265#endif
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
new file mode 100644
index 00000000000..c9a3f7a21d5
--- /dev/null
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -0,0 +1,1183 @@
1#include <linux/kernel.h>
2#include <linux/ip.h>
3#include <linux/sctp.h>
4#include <net/ip.h>
5#include <net/ip6_checksum.h>
6#include <linux/netfilter.h>
7#include <linux/netfilter_ipv4.h>
8#include <net/sctp/checksum.h>
9#include <net/ip_vs.h>
10
11
12static struct ip_vs_conn *
13sctp_conn_in_get(int af,
14 const struct sk_buff *skb,
15 struct ip_vs_protocol *pp,
16 const struct ip_vs_iphdr *iph,
17 unsigned int proto_off,
18 int inverse)
19{
20 __be16 _ports[2], *pptr;
21
22 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
23 if (pptr == NULL)
24 return NULL;
25
26 if (likely(!inverse))
27 return ip_vs_conn_in_get(af, iph->protocol,
28 &iph->saddr, pptr[0],
29 &iph->daddr, pptr[1]);
30 else
31 return ip_vs_conn_in_get(af, iph->protocol,
32 &iph->daddr, pptr[1],
33 &iph->saddr, pptr[0]);
34}
35
36static struct ip_vs_conn *
37sctp_conn_out_get(int af,
38 const struct sk_buff *skb,
39 struct ip_vs_protocol *pp,
40 const struct ip_vs_iphdr *iph,
41 unsigned int proto_off,
42 int inverse)
43{
44 __be16 _ports[2], *pptr;
45
46 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
47 if (pptr == NULL)
48 return NULL;
49
50 if (likely(!inverse))
51 return ip_vs_conn_out_get(af, iph->protocol,
52 &iph->saddr, pptr[0],
53 &iph->daddr, pptr[1]);
54 else
55 return ip_vs_conn_out_get(af, iph->protocol,
56 &iph->daddr, pptr[1],
57 &iph->saddr, pptr[0]);
58}
59
60static int
61sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
62 int *verdict, struct ip_vs_conn **cpp)
63{
64 struct ip_vs_service *svc;
65 sctp_chunkhdr_t _schunkh, *sch;
66 sctp_sctphdr_t *sh, _sctph;
67 struct ip_vs_iphdr iph;
68
69 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
70
71 sh = skb_header_pointer(skb, iph.len, sizeof(_sctph), &_sctph);
72 if (sh == NULL)
73 return 0;
74
75 sch = skb_header_pointer(skb, iph.len + sizeof(sctp_sctphdr_t),
76 sizeof(_schunkh), &_schunkh);
77 if (sch == NULL)
78 return 0;
79
80 if ((sch->type == SCTP_CID_INIT) &&
81 (svc = ip_vs_service_get(af, skb->mark, iph.protocol,
82 &iph.daddr, sh->dest))) {
83 if (ip_vs_todrop()) {
84 /*
85 * It seems that we are very loaded.
86 * We have to drop this packet :(
87 */
88 ip_vs_service_put(svc);
89 *verdict = NF_DROP;
90 return 0;
91 }
92 /*
93 * Let the virtual server select a real server for the
94 * incoming connection, and create a connection entry.
95 */
96 *cpp = ip_vs_schedule(svc, skb);
97 if (!*cpp) {
98 *verdict = ip_vs_leave(svc, skb, pp);
99 return 0;
100 }
101 ip_vs_service_put(svc);
102 }
103
104 return 1;
105}
106
107static int
108sctp_snat_handler(struct sk_buff *skb,
109 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
110{
111 sctp_sctphdr_t *sctph;
112 unsigned int sctphoff;
113 __be32 crc32;
114
115#ifdef CONFIG_IP_VS_IPV6
116 if (cp->af == AF_INET6)
117 sctphoff = sizeof(struct ipv6hdr);
118 else
119#endif
120 sctphoff = ip_hdrlen(skb);
121
122 /* csum_check requires unshared skb */
123 if (!skb_make_writable(skb, sctphoff + sizeof(*sctph)))
124 return 0;
125
126 if (unlikely(cp->app != NULL)) {
127 /* Some checks before mangling */
128 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
129 return 0;
130
131 /* Call application helper if needed */
132 if (!ip_vs_app_pkt_out(cp, skb))
133 return 0;
134 }
135
136 sctph = (void *) skb_network_header(skb) + sctphoff;
137 sctph->source = cp->vport;
138
139 /* Calculate the checksum */
140 crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
141 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next)
142 crc32 = sctp_update_cksum((u8 *) skb->data, skb_headlen(skb),
143 crc32);
144 crc32 = sctp_end_cksum(crc32);
145 sctph->checksum = crc32;
146
147 return 1;
148}
149
150static int
151sctp_dnat_handler(struct sk_buff *skb,
152 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
153{
154
155 sctp_sctphdr_t *sctph;
156 unsigned int sctphoff;
157 __be32 crc32;
158
159#ifdef CONFIG_IP_VS_IPV6
160 if (cp->af == AF_INET6)
161 sctphoff = sizeof(struct ipv6hdr);
162 else
163#endif
164 sctphoff = ip_hdrlen(skb);
165
166 /* csum_check requires unshared skb */
167 if (!skb_make_writable(skb, sctphoff + sizeof(*sctph)))
168 return 0;
169
170 if (unlikely(cp->app != NULL)) {
171 /* Some checks before mangling */
172 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
173 return 0;
174
175 /* Call application helper if needed */
176 if (!ip_vs_app_pkt_out(cp, skb))
177 return 0;
178 }
179
180 sctph = (void *) skb_network_header(skb) + sctphoff;
181 sctph->dest = cp->dport;
182
183 /* Calculate the checksum */
184 crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
185 for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next)
186 crc32 = sctp_update_cksum((u8 *) skb->data, skb_headlen(skb),
187 crc32);
188 crc32 = sctp_end_cksum(crc32);
189 sctph->checksum = crc32;
190
191 return 1;
192}
193
194static int
195sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
196{
197 struct sk_buff *list = skb_shinfo(skb)->frag_list;
198 unsigned int sctphoff;
199 struct sctphdr *sh, _sctph;
200 __le32 cmp;
201 __le32 val;
202 __u32 tmp;
203
204#ifdef CONFIG_IP_VS_IPV6
205 if (af == AF_INET6)
206 sctphoff = sizeof(struct ipv6hdr);
207 else
208#endif
209 sctphoff = ip_hdrlen(skb);
210
211 sh = skb_header_pointer(skb, sctphoff, sizeof(_sctph), &_sctph);
212 if (sh == NULL)
213 return 0;
214
215 cmp = sh->checksum;
216
217 tmp = sctp_start_cksum((__u8 *) sh, skb_headlen(skb));
218 for (; list; list = list->next)
219 tmp = sctp_update_cksum((__u8 *) list->data,
220 skb_headlen(list), tmp);
221
222 val = sctp_end_cksum(tmp);
223
224 if (val != cmp) {
225 /* CRC failure, dump it. */
226 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
227 "Failed checksum for");
228 return 0;
229 }
230 return 1;
231}
232
233struct ipvs_sctp_nextstate {
234 int next_state;
235};
236enum ipvs_sctp_event_t {
237 IP_VS_SCTP_EVE_DATA_CLI,
238 IP_VS_SCTP_EVE_DATA_SER,
239 IP_VS_SCTP_EVE_INIT_CLI,
240 IP_VS_SCTP_EVE_INIT_SER,
241 IP_VS_SCTP_EVE_INIT_ACK_CLI,
242 IP_VS_SCTP_EVE_INIT_ACK_SER,
243 IP_VS_SCTP_EVE_COOKIE_ECHO_CLI,
244 IP_VS_SCTP_EVE_COOKIE_ECHO_SER,
245 IP_VS_SCTP_EVE_COOKIE_ACK_CLI,
246 IP_VS_SCTP_EVE_COOKIE_ACK_SER,
247 IP_VS_SCTP_EVE_ABORT_CLI,
248 IP_VS_SCTP_EVE__ABORT_SER,
249 IP_VS_SCTP_EVE_SHUT_CLI,
250 IP_VS_SCTP_EVE_SHUT_SER,
251 IP_VS_SCTP_EVE_SHUT_ACK_CLI,
252 IP_VS_SCTP_EVE_SHUT_ACK_SER,
253 IP_VS_SCTP_EVE_SHUT_COM_CLI,
254 IP_VS_SCTP_EVE_SHUT_COM_SER,
255 IP_VS_SCTP_EVE_LAST
256};
257
258static enum ipvs_sctp_event_t sctp_events[255] = {
259 IP_VS_SCTP_EVE_DATA_CLI,
260 IP_VS_SCTP_EVE_INIT_CLI,
261 IP_VS_SCTP_EVE_INIT_ACK_CLI,
262 IP_VS_SCTP_EVE_DATA_CLI,
263 IP_VS_SCTP_EVE_DATA_CLI,
264 IP_VS_SCTP_EVE_DATA_CLI,
265 IP_VS_SCTP_EVE_ABORT_CLI,
266 IP_VS_SCTP_EVE_SHUT_CLI,
267 IP_VS_SCTP_EVE_SHUT_ACK_CLI,
268 IP_VS_SCTP_EVE_DATA_CLI,
269 IP_VS_SCTP_EVE_COOKIE_ECHO_CLI,
270 IP_VS_SCTP_EVE_COOKIE_ACK_CLI,
271 IP_VS_SCTP_EVE_DATA_CLI,
272 IP_VS_SCTP_EVE_DATA_CLI,
273 IP_VS_SCTP_EVE_SHUT_COM_CLI,
274};
275
276static struct ipvs_sctp_nextstate
277 sctp_states_table[IP_VS_SCTP_S_LAST][IP_VS_SCTP_EVE_LAST] = {
278 /*
279 * STATE : IP_VS_SCTP_S_NONE
280 */
281 /*next state *//*event */
282 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
283 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
284 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
285 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
286 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
287 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
288 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
289 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
290 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
291 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
292 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
293 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
294 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
295 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
296 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
297 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
298 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
299 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ },
300 },
301 /*
302 * STATE : IP_VS_SCTP_S_INIT_CLI
303 * Cient sent INIT and is waiting for reply from server(In ECHO_WAIT)
304 */
305 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
306 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
307 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
308 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
309 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
310 {IP_VS_SCTP_S_INIT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
311 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ECHO_CLI */ },
312 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_ECHO_SER */ },
313 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
314 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
315 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
316 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
317 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
318 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
319 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
320 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
321 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
322 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
323 },
324 /*
325 * State : IP_VS_SCTP_S_INIT_SER
326 * Server sent INIT and waiting for INIT ACK from the client
327 */
328 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
329 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
330 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
331 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
332 {IP_VS_SCTP_S_INIT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
333 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
334 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
335 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
336 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
337 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
338 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
339 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
340 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
341 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
342 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
343 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
344 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
345 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
346 },
347 /*
348 * State : IP_VS_SCTP_S_INIT_ACK_CLI
349 * Client sent INIT ACK and waiting for ECHO from the server
350 */
351 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
352 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
353 /*
354 * We have got an INIT from client. From the spec.“Upon receipt of
355 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
356 * an INIT ACK using the same parameters it sent in its original
357 * INIT chunk (including its Initiate Tag, unchanged”).
358 */
359 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
360 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
361 /*
362 * INIT_ACK has been resent by the client, let us stay is in
363 * the same state
364 */
365 {IP_VS_SCTP_S_INIT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
366 /*
367 * INIT_ACK sent by the server, close the connection
368 */
369 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
370 /*
371 * ECHO by client, it should not happen, close the connection
372 */
373 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
374 /*
375 * ECHO by server, this is what we are expecting, move to ECHO_SER
376 */
377 {IP_VS_SCTP_S_ECHO_SER /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
378 /*
379 * COOKIE ACK from client, it should not happen, close the connection
380 */
381 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
382 /*
383 * Unexpected COOKIE ACK from server, staty in the same state
384 */
385 {IP_VS_SCTP_S_INIT_ACK_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
386 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
387 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
388 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
389 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
390 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
391 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
392 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
393 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
394 },
395 /*
396 * State : IP_VS_SCTP_S_INIT_ACK_SER
397 * Server sent INIT ACK and waiting for ECHO from the client
398 */
399 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
400 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
401 /*
402 * We have got an INIT from client. From the spec.“Upon receipt of
403 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
404 * an INIT ACK using the same parameters it sent in its original
405 * INIT chunk (including its Initiate Tag, unchanged”).
406 */
407 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
408 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
409 /*
410 * Unexpected INIT_ACK by the client, let us close the connection
411 */
412 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
413 /*
414 * INIT_ACK resent by the server, let us move to same state
415 */
416 {IP_VS_SCTP_S_INIT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
417 /*
418 * Client send the ECHO, this is what we are expecting,
419 * move to ECHO_CLI
420 */
421 {IP_VS_SCTP_S_ECHO_CLI /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
422 /*
423 * ECHO received from the server, Not sure what to do,
424 * let us close it
425 */
426 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
427 /*
428 * COOKIE ACK from client, let us stay in the same state
429 */
430 {IP_VS_SCTP_S_INIT_ACK_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
431 /*
432 * COOKIE ACK from server, hmm... this should not happen, lets close
433 * the connection.
434 */
435 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
436 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
437 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
438 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
439 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
440 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
441 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
442 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
443 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
444 },
445 /*
446 * State : IP_VS_SCTP_S_ECHO_CLI
447 * Cient sent ECHO and waiting COOKEI ACK from the Server
448 */
449 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
450 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
451 /*
452 * We have got an INIT from client. From the spec.“Upon receipt of
453 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
454 * an INIT ACK using the same parameters it sent in its original
455 * INIT chunk (including its Initiate Tag, unchanged”).
456 */
457 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
458 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
459 /*
460 * INIT_ACK has been by the client, let us close the connection
461 */
462 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
463 /*
464 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
465 * “If an INIT ACK is received by an endpoint in any state other
466 * than the COOKIE-WAIT state, the endpoint should discard the
467 * INIT ACK chunk”. Stay in the same state
468 */
469 {IP_VS_SCTP_S_ECHO_CLI /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
470 /*
471 * Client resent the ECHO, let us stay in the same state
472 */
473 {IP_VS_SCTP_S_ECHO_CLI /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
474 /*
475 * ECHO received from the server, Not sure what to do,
476 * let us close it
477 */
478 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
479 /*
480 * COOKIE ACK from client, this shoud not happen, let's close the
481 * connection
482 */
483 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
484 /*
485 * COOKIE ACK from server, this is what we are awaiting,lets move to
486 * ESTABLISHED.
487 */
488 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
489 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
490 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
491 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
492 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
493 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
494 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
495 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
496 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
497 },
498 /*
499 * State : IP_VS_SCTP_S_ECHO_SER
500 * Server sent ECHO and waiting COOKEI ACK from the client
501 */
502 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
503 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
504 /*
505 * We have got an INIT from client. From the spec.“Upon receipt of
506 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
507 * an INIT ACK using the same parameters it sent in its original
508 * INIT chunk (including its Initiate Tag, unchanged”).
509 */
510 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
511 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
512 /*
513 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
514 * “If an INIT ACK is received by an endpoint in any state other
515 * than the COOKIE-WAIT state, the endpoint should discard the
516 * INIT ACK chunk”. Stay in the same state
517 */
518 {IP_VS_SCTP_S_ECHO_SER /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
519 /*
520 * INIT_ACK has been by the server, let us close the connection
521 */
522 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
523 /*
524 * Client sent the ECHO, not sure what to do, let's close the
525 * connection.
526 */
527 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
528 /*
529 * ECHO resent by the server, stay in the same state
530 */
531 {IP_VS_SCTP_S_ECHO_SER /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
532 /*
533 * COOKIE ACK from client, this is what we are expecting, let's move
534 * to ESTABLISHED.
535 */
536 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
537 /*
538 * COOKIE ACK from server, this should not happen, lets close the
539 * connection.
540 */
541 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
542 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
543 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
544 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
545 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
546 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
547 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
548 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
549 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
550 },
551 /*
552 * State : IP_VS_SCTP_S_ESTABLISHED
553 * Association established
554 */
555 {{IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_DATA_CLI */ },
556 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_DATA_SER */ },
557 /*
558 * We have got an INIT from client. From the spec.“Upon receipt of
559 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
560 * an INIT ACK using the same parameters it sent in its original
561 * INIT chunk (including its Initiate Tag, unchanged”).
562 */
563 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
564 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
565 /*
566 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
567 * “If an INIT ACK is received by an endpoint in any state other
568 * than the COOKIE-WAIT state, the endpoint should discard the
569 * INIT ACK chunk”. Stay in the same state
570 */
571 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
572 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
573 /*
574 * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
575 * peer and peer shall move to the ESTABISHED. if it doesn't handle
576 * it will send ERROR chunk. So, stay in the same state
577 */
578 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
579 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
580 /*
581 * COOKIE ACK from client, not sure what to do stay in the same state
582 */
583 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
584 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
585 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
586 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
587 /*
588 * SHUTDOWN from the client, move to SHUDDOWN_CLI
589 */
590 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
591 /*
592 * SHUTDOWN from the server, move to SHUTDOWN_SER
593 */
594 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
595 /*
596 * client sent SHUDTDOWN_ACK, this should not happen, let's close
597 * the connection
598 */
599 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
600 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
601 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
602 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
603 },
604 /*
605 * State : IP_VS_SCTP_S_SHUT_CLI
606 * SHUTDOWN sent from the client, waitinf for SHUT ACK from the server
607 */
608 /*
609 * We recieved the data chuck, keep the state unchanged. I assume
610 * that still data chuncks can be received by both the peers in
611 * SHUDOWN state
612 */
613
614 {{IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_DATA_CLI */ },
615 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_DATA_SER */ },
616 /*
617 * We have got an INIT from client. From the spec.“Upon receipt of
618 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
619 * an INIT ACK using the same parameters it sent in its original
620 * INIT chunk (including its Initiate Tag, unchanged”).
621 */
622 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
623 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
624 /*
625 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
626 * “If an INIT ACK is received by an endpoint in any state other
627 * than the COOKIE-WAIT state, the endpoint should discard the
628 * INIT ACK chunk”. Stay in the same state
629 */
630 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
631 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
632 /*
633 * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
634 * peer and peer shall move to the ESTABISHED. if it doesn't handle
635 * it will send ERROR chunk. So, stay in the same state
636 */
637 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
638 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
639 /*
640 * COOKIE ACK from client, not sure what to do stay in the same state
641 */
642 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
643 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
644 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
645 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
646 /*
647 * SHUTDOWN resent from the client, move to SHUDDOWN_CLI
648 */
649 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
650 /*
651 * SHUTDOWN from the server, move to SHUTDOWN_SER
652 */
653 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
654 /*
655 * client sent SHUDTDOWN_ACK, this should not happen, let's close
656 * the connection
657 */
658 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
659 /*
660 * Server sent SHUTDOWN ACK, this is what we are expecting, let's move
661 * to SHUDOWN_ACK_SER
662 */
663 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
664 /*
665 * SHUTDOWN COM from client, this should not happen, let's close the
666 * connection
667 */
668 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
669 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
670 },
671 /*
672 * State : IP_VS_SCTP_S_SHUT_SER
673 * SHUTDOWN sent from the server, waitinf for SHUTDOWN ACK from client
674 */
675 /*
676 * We recieved the data chuck, keep the state unchanged. I assume
677 * that still data chuncks can be received by both the peers in
678 * SHUDOWN state
679 */
680
681 {{IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_DATA_CLI */ },
682 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_DATA_SER */ },
683 /*
684 * We have got an INIT from client. From the spec.“Upon receipt of
685 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
686 * an INIT ACK using the same parameters it sent in its original
687 * INIT chunk (including its Initiate Tag, unchanged”).
688 */
689 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
690 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
691 /*
692 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
693 * “If an INIT ACK is received by an endpoint in any state other
694 * than the COOKIE-WAIT state, the endpoint should discard the
695 * INIT ACK chunk”. Stay in the same state
696 */
697 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
698 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
699 /*
700 * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
701 * peer and peer shall move to the ESTABISHED. if it doesn't handle
702 * it will send ERROR chunk. So, stay in the same state
703 */
704 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
705 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
706 /*
707 * COOKIE ACK from client, not sure what to do stay in the same state
708 */
709 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
710 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
711 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
712 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
713 /*
714 * SHUTDOWN resent from the client, move to SHUDDOWN_CLI
715 */
716 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
717 /*
718 * SHUTDOWN resent from the server, move to SHUTDOWN_SER
719 */
720 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
721 /*
722 * client sent SHUDTDOWN_ACK, this is what we are expecting, let's
723 * move to SHUT_ACK_CLI
724 */
725 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
726 /*
727 * Server sent SHUTDOWN ACK, this should not happen, let's close the
728 * connection
729 */
730 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
731 /*
732 * SHUTDOWN COM from client, this should not happen, let's close the
733 * connection
734 */
735 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
736 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
737 },
738
739 /*
740 * State : IP_VS_SCTP_S_SHUT_ACK_CLI
741 * SHUTDOWN ACK from the client, awaiting for SHUTDOWN COM from server
742 */
743 /*
744 * We recieved the data chuck, keep the state unchanged. I assume
745 * that still data chuncks can be received by both the peers in
746 * SHUDOWN state
747 */
748
749 {{IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_DATA_CLI */ },
750 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_DATA_SER */ },
751 /*
752 * We have got an INIT from client. From the spec.“Upon receipt of
753 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
754 * an INIT ACK using the same parameters it sent in its original
755 * INIT chunk (including its Initiate Tag, unchanged”).
756 */
757 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
758 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
759 /*
760 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
761 * “If an INIT ACK is received by an endpoint in any state other
762 * than the COOKIE-WAIT state, the endpoint should discard the
763 * INIT ACK chunk”. Stay in the same state
764 */
765 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
766 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
767 /*
768 * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
769 * peer and peer shall move to the ESTABISHED. if it doesn't handle
770 * it will send ERROR chunk. So, stay in the same state
771 */
772 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
773 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
774 /*
775 * COOKIE ACK from client, not sure what to do stay in the same state
776 */
777 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
778 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
779 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
780 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
781 /*
782 * SHUTDOWN sent from the client, move to SHUDDOWN_CLI
783 */
784 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
785 /*
786 * SHUTDOWN sent from the server, move to SHUTDOWN_SER
787 */
788 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
789 /*
790 * client resent SHUDTDOWN_ACK, let's stay in the same state
791 */
792 {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
793 /*
794 * Server sent SHUTDOWN ACK, this should not happen, let's close the
795 * connection
796 */
797 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
798 /*
799 * SHUTDOWN COM from client, this should not happen, let's close the
800 * connection
801 */
802 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
803 /*
804 * SHUTDOWN COMPLETE from server this is what we are expecting.
805 */
806 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
807 },
808
809 /*
810 * State : IP_VS_SCTP_S_SHUT_ACK_SER
811 * SHUTDOWN ACK from the server, awaiting for SHUTDOWN COM from client
812 */
813 /*
814 * We recieved the data chuck, keep the state unchanged. I assume
815 * that still data chuncks can be received by both the peers in
816 * SHUDOWN state
817 */
818
819 {{IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_DATA_CLI */ },
820 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_DATA_SER */ },
821 /*
822 * We have got an INIT from client. From the spec.“Upon receipt of
823 * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
824 * an INIT ACK using the same parameters it sent in its original
825 * INIT chunk (including its Initiate Tag, unchanged”).
826 */
827 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
828 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
829 /*
830 * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
831 * “If an INIT ACK is received by an endpoint in any state other
832 * than the COOKIE-WAIT state, the endpoint should discard the
833 * INIT ACK chunk”. Stay in the same state
834 */
835 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
836 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
837 /*
838 * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
839 * peer and peer shall move to the ESTABISHED. if it doesn't handle
840 * it will send ERROR chunk. So, stay in the same state
841 */
842 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
843 {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
844 /*
845 * COOKIE ACK from client, not sure what to do stay in the same state
846 */
847 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
848 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
849 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
850 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
851 /*
852 * SHUTDOWN sent from the client, move to SHUDDOWN_CLI
853 */
854 {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
855 /*
856 * SHUTDOWN sent from the server, move to SHUTDOWN_SER
857 */
858 {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
859 /*
860 * client sent SHUDTDOWN_ACK, this should not happen let's close
861 * the connection.
862 */
863 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
864 /*
865 * Server resent SHUTDOWN ACK, stay in the same state
866 */
867 {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
868 /*
869 * SHUTDOWN COM from client, this what we are expecting, let's close
870 * the connection
871 */
872 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
873 /*
874 * SHUTDOWN COMPLETE from server this should not happen.
875 */
876 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
877 },
878 /*
879 * State : IP_VS_SCTP_S_CLOSED
880 */
881 {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
882 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
883 {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
884 {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
885 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
886 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
887 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
888 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
889 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
890 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
891 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
892 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
893 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
894 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
895 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
896 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
897 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
898 {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
899 }
900};
901
902/*
903 * Timeout table[state]
904 */
905static int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = {
906 [IP_VS_SCTP_S_NONE] = 2 * HZ,
907 [IP_VS_SCTP_S_INIT_CLI] = 1 * 60 * HZ,
908 [IP_VS_SCTP_S_INIT_SER] = 1 * 60 * HZ,
909 [IP_VS_SCTP_S_INIT_ACK_CLI] = 1 * 60 * HZ,
910 [IP_VS_SCTP_S_INIT_ACK_SER] = 1 * 60 * HZ,
911 [IP_VS_SCTP_S_ECHO_CLI] = 1 * 60 * HZ,
912 [IP_VS_SCTP_S_ECHO_SER] = 1 * 60 * HZ,
913 [IP_VS_SCTP_S_ESTABLISHED] = 15 * 60 * HZ,
914 [IP_VS_SCTP_S_SHUT_CLI] = 1 * 60 * HZ,
915 [IP_VS_SCTP_S_SHUT_SER] = 1 * 60 * HZ,
916 [IP_VS_SCTP_S_SHUT_ACK_CLI] = 1 * 60 * HZ,
917 [IP_VS_SCTP_S_SHUT_ACK_SER] = 1 * 60 * HZ,
918 [IP_VS_SCTP_S_CLOSED] = 10 * HZ,
919 [IP_VS_SCTP_S_LAST] = 2 * HZ,
920};
921
922static const char *sctp_state_name_table[IP_VS_SCTP_S_LAST + 1] = {
923 [IP_VS_SCTP_S_NONE] = "NONE",
924 [IP_VS_SCTP_S_INIT_CLI] = "INIT_CLI",
925 [IP_VS_SCTP_S_INIT_SER] = "INIT_SER",
926 [IP_VS_SCTP_S_INIT_ACK_CLI] = "INIT_ACK_CLI",
927 [IP_VS_SCTP_S_INIT_ACK_SER] = "INIT_ACK_SER",
928 [IP_VS_SCTP_S_ECHO_CLI] = "COOKIE_ECHO_CLI",
929 [IP_VS_SCTP_S_ECHO_SER] = "COOKIE_ECHO_SER",
930 [IP_VS_SCTP_S_ESTABLISHED] = "ESTABISHED",
931 [IP_VS_SCTP_S_SHUT_CLI] = "SHUTDOWN_CLI",
932 [IP_VS_SCTP_S_SHUT_SER] = "SHUTDOWN_SER",
933 [IP_VS_SCTP_S_SHUT_ACK_CLI] = "SHUTDOWN_ACK_CLI",
934 [IP_VS_SCTP_S_SHUT_ACK_SER] = "SHUTDOWN_ACK_SER",
935 [IP_VS_SCTP_S_CLOSED] = "CLOSED",
936 [IP_VS_SCTP_S_LAST] = "BUG!"
937};
938
939
940static const char *sctp_state_name(int state)
941{
942 if (state >= IP_VS_SCTP_S_LAST)
943 return "ERR!";
944 if (sctp_state_name_table[state])
945 return sctp_state_name_table[state];
946 return "?";
947}
948
949static void sctp_timeout_change(struct ip_vs_protocol *pp, int flags)
950{
951}
952
953static int
954sctp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
955{
956
957return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_SCTP_S_LAST,
958 sctp_state_name_table, sname, to);
959}
960
961static inline int
962set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
963 int direction, const struct sk_buff *skb)
964{
965 sctp_chunkhdr_t _sctpch, *sch;
966 unsigned char chunk_type;
967 int event, next_state;
968 int ihl;
969
970#ifdef CONFIG_IP_VS_IPV6
971 ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
972#else
973 ihl = ip_hdrlen(skb);
974#endif
975
976 sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t),
977 sizeof(_sctpch), &_sctpch);
978 if (sch == NULL)
979 return 0;
980
981 chunk_type = sch->type;
982 /*
983 * Section 3: Multiple chunks can be bundled into one SCTP packet
984 * up to the MTU size, except for the INIT, INIT ACK, and
985 * SHUTDOWN COMPLETE chunks. These chunks MUST NOT be bundled with
986 * any other chunk in a packet.
987 *
988 * Section 3.3.7: DATA chunks MUST NOT be bundled with ABORT. Control
989 * chunks (except for INIT, INIT ACK, and SHUTDOWN COMPLETE) MAY be
990 * bundled with an ABORT, but they MUST be placed before the ABORT
991 * in the SCTP packet or they will be ignored by the receiver.
992 */
993 if ((sch->type == SCTP_CID_COOKIE_ECHO) ||
994 (sch->type == SCTP_CID_COOKIE_ACK)) {
995 sch = skb_header_pointer(skb, (ihl + sizeof(sctp_sctphdr_t) +
996 sch->length), sizeof(_sctpch), &_sctpch);
997 if (sch) {
998 if (sch->type == SCTP_CID_ABORT)
999 chunk_type = sch->type;
1000 }
1001 }
1002
1003 event = sctp_events[chunk_type];
1004
1005 /*
1006 * If the direction is IP_VS_DIR_OUTPUT, this event is from server
1007 */
1008 if (direction == IP_VS_DIR_OUTPUT)
1009 event++;
1010 /*
1011 * get next state
1012 */
1013 next_state = sctp_states_table[cp->state][event].next_state;
1014
1015 if (next_state != cp->state) {
1016 struct ip_vs_dest *dest = cp->dest;
1017
1018 IP_VS_DBG_BUF(8, "%s %s %s:%d->"
1019 "%s:%d state: %s->%s conn->refcnt:%d\n",
1020 pp->name,
1021 ((direction == IP_VS_DIR_OUTPUT) ?
1022 "output " : "input "),
1023 IP_VS_DBG_ADDR(cp->af, &cp->daddr),
1024 ntohs(cp->dport),
1025 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
1026 ntohs(cp->cport),
1027 sctp_state_name(cp->state),
1028 sctp_state_name(next_state),
1029 atomic_read(&cp->refcnt));
1030 if (dest) {
1031 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
1032 (next_state != IP_VS_SCTP_S_ESTABLISHED)) {
1033 atomic_dec(&dest->activeconns);
1034 atomic_inc(&dest->inactconns);
1035 cp->flags |= IP_VS_CONN_F_INACTIVE;
1036 } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
1037 (next_state == IP_VS_SCTP_S_ESTABLISHED)) {
1038 atomic_inc(&dest->activeconns);
1039 atomic_dec(&dest->inactconns);
1040 cp->flags &= ~IP_VS_CONN_F_INACTIVE;
1041 }
1042 }
1043 }
1044
1045 cp->timeout = pp->timeout_table[cp->state = next_state];
1046
1047 return 1;
1048}
1049
1050static int
1051sctp_state_transition(struct ip_vs_conn *cp, int direction,
1052 const struct sk_buff *skb, struct ip_vs_protocol *pp)
1053{
1054 int ret = 0;
1055
1056 spin_lock(&cp->lock);
1057 ret = set_sctp_state(pp, cp, direction, skb);
1058 spin_unlock(&cp->lock);
1059
1060 return ret;
1061}
1062
1063/*
1064 * Hash table for SCTP application incarnations
1065 */
1066#define SCTP_APP_TAB_BITS 4
1067#define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS)
1068#define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1)
1069
1070static struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
1071static DEFINE_SPINLOCK(sctp_app_lock);
1072
1073static inline __u16 sctp_app_hashkey(__be16 port)
1074{
1075 return (((__force u16)port >> SCTP_APP_TAB_BITS) ^ (__force u16)port)
1076 & SCTP_APP_TAB_MASK;
1077}
1078
1079static int sctp_register_app(struct ip_vs_app *inc)
1080{
1081 struct ip_vs_app *i;
1082 __u16 hash;
1083 __be16 port = inc->port;
1084 int ret = 0;
1085
1086 hash = sctp_app_hashkey(port);
1087
1088 spin_lock_bh(&sctp_app_lock);
1089 list_for_each_entry(i, &sctp_apps[hash], p_list) {
1090 if (i->port == port) {
1091 ret = -EEXIST;
1092 goto out;
1093 }
1094 }
1095 list_add(&inc->p_list, &sctp_apps[hash]);
1096 atomic_inc(&ip_vs_protocol_sctp.appcnt);
1097out:
1098 spin_unlock_bh(&sctp_app_lock);
1099
1100 return ret;
1101}
1102
1103static void sctp_unregister_app(struct ip_vs_app *inc)
1104{
1105 spin_lock_bh(&sctp_app_lock);
1106 atomic_dec(&ip_vs_protocol_sctp.appcnt);
1107 list_del(&inc->p_list);
1108 spin_unlock_bh(&sctp_app_lock);
1109}
1110
1111static int sctp_app_conn_bind(struct ip_vs_conn *cp)
1112{
1113 int hash;
1114 struct ip_vs_app *inc;
1115 int result = 0;
1116
1117 /* Default binding: bind app only for NAT */
1118 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
1119 return 0;
1120 /* Lookup application incarnations and bind the right one */
1121 hash = sctp_app_hashkey(cp->vport);
1122
1123 spin_lock(&sctp_app_lock);
1124 list_for_each_entry(inc, &sctp_apps[hash], p_list) {
1125 if (inc->port == cp->vport) {
1126 if (unlikely(!ip_vs_app_inc_get(inc)))
1127 break;
1128 spin_unlock(&sctp_app_lock);
1129
1130 IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
1131 "%s:%u to app %s on port %u\n",
1132 __func__,
1133 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
1134 ntohs(cp->cport),
1135 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
1136 ntohs(cp->vport),
1137 inc->name, ntohs(inc->port));
1138 cp->app = inc;
1139 if (inc->init_conn)
1140 result = inc->init_conn(inc, cp);
1141 goto out;
1142 }
1143 }
1144 spin_unlock(&sctp_app_lock);
1145out:
1146 return result;
1147}
1148
1149static void ip_vs_sctp_init(struct ip_vs_protocol *pp)
1150{
1151 IP_VS_INIT_HASH_TABLE(sctp_apps);
1152 pp->timeout_table = sctp_timeouts;
1153}
1154
1155
1156static void ip_vs_sctp_exit(struct ip_vs_protocol *pp)
1157{
1158
1159}
1160
1161struct ip_vs_protocol ip_vs_protocol_sctp = {
1162 .name = "SCTP",
1163 .protocol = IPPROTO_SCTP,
1164 .num_states = IP_VS_SCTP_S_LAST,
1165 .dont_defrag = 0,
1166 .appcnt = ATOMIC_INIT(0),
1167 .init = ip_vs_sctp_init,
1168 .exit = ip_vs_sctp_exit,
1169 .register_app = sctp_register_app,
1170 .unregister_app = sctp_unregister_app,
1171 .conn_schedule = sctp_conn_schedule,
1172 .conn_in_get = sctp_conn_in_get,
1173 .conn_out_get = sctp_conn_out_get,
1174 .snat_handler = sctp_snat_handler,
1175 .dnat_handler = sctp_dnat_handler,
1176 .csum_check = sctp_csum_check,
1177 .state_name = sctp_state_name,
1178 .state_transition = sctp_state_transition,
1179 .app_conn_bind = sctp_app_conn_bind,
1180 .debug_packet = ip_vs_tcpudp_debug_packet,
1181 .timeout_change = sctp_timeout_change,
1182 .set_state_timeout = sctp_set_state_timeout,
1183};
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index e177f0dc208..8fb0ae61676 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -400,6 +400,11 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
400 flags |= IP_VS_CONN_F_INACTIVE; 400 flags |= IP_VS_CONN_F_INACTIVE;
401 else 401 else
402 flags &= ~IP_VS_CONN_F_INACTIVE; 402 flags &= ~IP_VS_CONN_F_INACTIVE;
403 } else if (s->protocol == IPPROTO_SCTP) {
404 if (state != IP_VS_SCTP_S_ESTABLISHED)
405 flags |= IP_VS_CONN_F_INACTIVE;
406 else
407 flags &= ~IP_VS_CONN_F_INACTIVE;
403 } 408 }
404 cp = ip_vs_conn_new(AF_INET, s->protocol, 409 cp = ip_vs_conn_new(AF_INET, s->protocol,
405 (union nf_inet_addr *)&s->caddr, 410 (union nf_inet_addr *)&s->caddr,
@@ -434,6 +439,15 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
434 atomic_dec(&dest->inactconns); 439 atomic_dec(&dest->inactconns);
435 cp->flags &= ~IP_VS_CONN_F_INACTIVE; 440 cp->flags &= ~IP_VS_CONN_F_INACTIVE;
436 } 441 }
442 } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
443 (cp->state != state)) {
444 dest = cp->dest;
445 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
446 (state != IP_VS_SCTP_S_ESTABLISHED)) {
447 atomic_dec(&dest->activeconns);
448 atomic_inc(&dest->inactconns);
449 cp->flags &= ~IP_VS_CONN_F_INACTIVE;
450 }
437 } 451 }
438 452
439 if (opt) 453 if (opt)
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 30b3189bd29..223b5018c7d 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -311,7 +311,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
311 mtu = dst_mtu(&rt->u.dst); 311 mtu = dst_mtu(&rt->u.dst);
312 if (skb->len > mtu) { 312 if (skb->len > mtu) {
313 dst_release(&rt->u.dst); 313 dst_release(&rt->u.dst);
314 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 314 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
315 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 315 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
316 goto tx_error; 316 goto tx_error;
317 } 317 }
@@ -454,7 +454,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
454 mtu = dst_mtu(&rt->u.dst); 454 mtu = dst_mtu(&rt->u.dst);
455 if (skb->len > mtu) { 455 if (skb->len > mtu) {
456 dst_release(&rt->u.dst); 456 dst_release(&rt->u.dst);
457 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 457 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
458 IP_VS_DBG_RL_PKT(0, pp, skb, 0, 458 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
459 "ip_vs_nat_xmit_v6(): frag needed for"); 459 "ip_vs_nat_xmit_v6(): frag needed for");
460 goto tx_error; 460 goto tx_error;
@@ -672,7 +672,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
672 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); 672 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
673 673
674 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) { 674 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
675 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 675 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
676 dst_release(&rt->u.dst); 676 dst_release(&rt->u.dst);
677 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 677 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
678 goto tx_error; 678 goto tx_error;
@@ -814,7 +814,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
814 /* MTU checking */ 814 /* MTU checking */
815 mtu = dst_mtu(&rt->u.dst); 815 mtu = dst_mtu(&rt->u.dst);
816 if (skb->len > mtu) { 816 if (skb->len > mtu) {
817 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 817 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
818 dst_release(&rt->u.dst); 818 dst_release(&rt->u.dst);
819 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 819 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
820 goto tx_error; 820 goto tx_error;
@@ -965,7 +965,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
965 mtu = dst_mtu(&rt->u.dst); 965 mtu = dst_mtu(&rt->u.dst);
966 if (skb->len > mtu) { 966 if (skb->len > mtu) {
967 dst_release(&rt->u.dst); 967 dst_release(&rt->u.dst);
968 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 968 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
969 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 969 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
970 goto tx_error; 970 goto tx_error;
971 } 971 }
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 4d79e3c1616..0c9bbe93cc1 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -42,6 +42,7 @@
42#include <net/netfilter/nf_conntrack_extend.h> 42#include <net/netfilter/nf_conntrack_extend.h>
43#include <net/netfilter/nf_conntrack_acct.h> 43#include <net/netfilter/nf_conntrack_acct.h>
44#include <net/netfilter/nf_conntrack_ecache.h> 44#include <net/netfilter/nf_conntrack_ecache.h>
45#include <net/netfilter/nf_conntrack_zones.h>
45#include <net/netfilter/nf_nat.h> 46#include <net/netfilter/nf_nat.h>
46#include <net/netfilter/nf_nat_core.h> 47#include <net/netfilter/nf_nat_core.h>
47 48
@@ -68,7 +69,7 @@ static int nf_conntrack_hash_rnd_initted;
68static unsigned int nf_conntrack_hash_rnd; 69static unsigned int nf_conntrack_hash_rnd;
69 70
70static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, 71static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
71 unsigned int size, unsigned int rnd) 72 u16 zone, unsigned int size, unsigned int rnd)
72{ 73{
73 unsigned int n; 74 unsigned int n;
74 u_int32_t h; 75 u_int32_t h;
@@ -79,16 +80,16 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
79 */ 80 */
80 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); 81 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
81 h = jhash2((u32 *)tuple, n, 82 h = jhash2((u32 *)tuple, n,
82 rnd ^ (((__force __u16)tuple->dst.u.all << 16) | 83 zone ^ rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
83 tuple->dst.protonum)); 84 tuple->dst.protonum));
84 85
85 return ((u64)h * size) >> 32; 86 return ((u64)h * size) >> 32;
86} 87}
87 88
88static inline u_int32_t hash_conntrack(const struct net *net, 89static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
89 const struct nf_conntrack_tuple *tuple) 90 const struct nf_conntrack_tuple *tuple)
90{ 91{
91 return __hash_conntrack(tuple, net->ct.htable_size, 92 return __hash_conntrack(tuple, zone, net->ct.htable_size,
92 nf_conntrack_hash_rnd); 93 nf_conntrack_hash_rnd);
93} 94}
94 95
@@ -292,11 +293,12 @@ static void death_by_timeout(unsigned long ul_conntrack)
292 * - Caller must lock nf_conntrack_lock before calling this function 293 * - Caller must lock nf_conntrack_lock before calling this function
293 */ 294 */
294struct nf_conntrack_tuple_hash * 295struct nf_conntrack_tuple_hash *
295__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple) 296__nf_conntrack_find(struct net *net, u16 zone,
297 const struct nf_conntrack_tuple *tuple)
296{ 298{
297 struct nf_conntrack_tuple_hash *h; 299 struct nf_conntrack_tuple_hash *h;
298 struct hlist_nulls_node *n; 300 struct hlist_nulls_node *n;
299 unsigned int hash = hash_conntrack(net, tuple); 301 unsigned int hash = hash_conntrack(net, zone, tuple);
300 302
301 /* Disable BHs the entire time since we normally need to disable them 303 /* Disable BHs the entire time since we normally need to disable them
302 * at least once for the stats anyway. 304 * at least once for the stats anyway.
@@ -304,7 +306,8 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
304 local_bh_disable(); 306 local_bh_disable();
305begin: 307begin:
306 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { 308 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
307 if (nf_ct_tuple_equal(tuple, &h->tuple)) { 309 if (nf_ct_tuple_equal(tuple, &h->tuple) &&
310 nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
308 NF_CT_STAT_INC(net, found); 311 NF_CT_STAT_INC(net, found);
309 local_bh_enable(); 312 local_bh_enable();
310 return h; 313 return h;
@@ -326,21 +329,23 @@ EXPORT_SYMBOL_GPL(__nf_conntrack_find);
326 329
327/* Find a connection corresponding to a tuple. */ 330/* Find a connection corresponding to a tuple. */
328struct nf_conntrack_tuple_hash * 331struct nf_conntrack_tuple_hash *
329nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple) 332nf_conntrack_find_get(struct net *net, u16 zone,
333 const struct nf_conntrack_tuple *tuple)
330{ 334{
331 struct nf_conntrack_tuple_hash *h; 335 struct nf_conntrack_tuple_hash *h;
332 struct nf_conn *ct; 336 struct nf_conn *ct;
333 337
334 rcu_read_lock(); 338 rcu_read_lock();
335begin: 339begin:
336 h = __nf_conntrack_find(net, tuple); 340 h = __nf_conntrack_find(net, zone, tuple);
337 if (h) { 341 if (h) {
338 ct = nf_ct_tuplehash_to_ctrack(h); 342 ct = nf_ct_tuplehash_to_ctrack(h);
339 if (unlikely(nf_ct_is_dying(ct) || 343 if (unlikely(nf_ct_is_dying(ct) ||
340 !atomic_inc_not_zero(&ct->ct_general.use))) 344 !atomic_inc_not_zero(&ct->ct_general.use)))
341 h = NULL; 345 h = NULL;
342 else { 346 else {
343 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) { 347 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
348 nf_ct_zone(ct) != zone)) {
344 nf_ct_put(ct); 349 nf_ct_put(ct);
345 goto begin; 350 goto begin;
346 } 351 }
@@ -368,9 +373,11 @@ void nf_conntrack_hash_insert(struct nf_conn *ct)
368{ 373{
369 struct net *net = nf_ct_net(ct); 374 struct net *net = nf_ct_net(ct);
370 unsigned int hash, repl_hash; 375 unsigned int hash, repl_hash;
376 u16 zone;
371 377
372 hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 378 zone = nf_ct_zone(ct);
373 repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 379 hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
380 repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
374 381
375 __nf_conntrack_hash_insert(ct, hash, repl_hash); 382 __nf_conntrack_hash_insert(ct, hash, repl_hash);
376} 383}
@@ -387,6 +394,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
387 struct hlist_nulls_node *n; 394 struct hlist_nulls_node *n;
388 enum ip_conntrack_info ctinfo; 395 enum ip_conntrack_info ctinfo;
389 struct net *net; 396 struct net *net;
397 u16 zone;
390 398
391 ct = nf_ct_get(skb, &ctinfo); 399 ct = nf_ct_get(skb, &ctinfo);
392 net = nf_ct_net(ct); 400 net = nf_ct_net(ct);
@@ -398,8 +406,9 @@ __nf_conntrack_confirm(struct sk_buff *skb)
398 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) 406 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
399 return NF_ACCEPT; 407 return NF_ACCEPT;
400 408
401 hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 409 zone = nf_ct_zone(ct);
402 repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 410 hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
411 repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
403 412
404 /* We're not in hash table, and we refuse to set up related 413 /* We're not in hash table, and we refuse to set up related
405 connections for unconfirmed conns. But packet copies and 414 connections for unconfirmed conns. But packet copies and
@@ -418,11 +427,13 @@ __nf_conntrack_confirm(struct sk_buff *skb)
418 not in the hash. If there is, we lost race. */ 427 not in the hash. If there is, we lost race. */
419 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) 428 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
420 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 429 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
421 &h->tuple)) 430 &h->tuple) &&
431 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
422 goto out; 432 goto out;
423 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode) 433 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
424 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, 434 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
425 &h->tuple)) 435 &h->tuple) &&
436 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
426 goto out; 437 goto out;
427 438
428 /* Remove from unconfirmed list */ 439 /* Remove from unconfirmed list */
@@ -469,15 +480,19 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
469 struct net *net = nf_ct_net(ignored_conntrack); 480 struct net *net = nf_ct_net(ignored_conntrack);
470 struct nf_conntrack_tuple_hash *h; 481 struct nf_conntrack_tuple_hash *h;
471 struct hlist_nulls_node *n; 482 struct hlist_nulls_node *n;
472 unsigned int hash = hash_conntrack(net, tuple); 483 struct nf_conn *ct;
484 u16 zone = nf_ct_zone(ignored_conntrack);
485 unsigned int hash = hash_conntrack(net, zone, tuple);
473 486
474 /* Disable BHs the entire time since we need to disable them at 487 /* Disable BHs the entire time since we need to disable them at
475 * least once for the stats anyway. 488 * least once for the stats anyway.
476 */ 489 */
477 rcu_read_lock_bh(); 490 rcu_read_lock_bh();
478 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { 491 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
479 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && 492 ct = nf_ct_tuplehash_to_ctrack(h);
480 nf_ct_tuple_equal(tuple, &h->tuple)) { 493 if (ct != ignored_conntrack &&
494 nf_ct_tuple_equal(tuple, &h->tuple) &&
495 nf_ct_zone(ct) == zone) {
481 NF_CT_STAT_INC(net, found); 496 NF_CT_STAT_INC(net, found);
482 rcu_read_unlock_bh(); 497 rcu_read_unlock_bh();
483 return 1; 498 return 1;
@@ -540,7 +555,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
540 return dropped; 555 return dropped;
541} 556}
542 557
543struct nf_conn *nf_conntrack_alloc(struct net *net, 558struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
544 const struct nf_conntrack_tuple *orig, 559 const struct nf_conntrack_tuple *orig,
545 const struct nf_conntrack_tuple *repl, 560 const struct nf_conntrack_tuple *repl,
546 gfp_t gfp) 561 gfp_t gfp)
@@ -558,7 +573,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
558 573
559 if (nf_conntrack_max && 574 if (nf_conntrack_max &&
560 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { 575 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
561 unsigned int hash = hash_conntrack(net, orig); 576 unsigned int hash = hash_conntrack(net, zone, orig);
562 if (!early_drop(net, hash)) { 577 if (!early_drop(net, hash)) {
563 atomic_dec(&net->ct.count); 578 atomic_dec(&net->ct.count);
564 if (net_ratelimit()) 579 if (net_ratelimit())
@@ -595,13 +610,28 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
595#ifdef CONFIG_NET_NS 610#ifdef CONFIG_NET_NS
596 ct->ct_net = net; 611 ct->ct_net = net;
597#endif 612#endif
598 613#ifdef CONFIG_NF_CONNTRACK_ZONES
614 if (zone) {
615 struct nf_conntrack_zone *nf_ct_zone;
616
617 nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
618 if (!nf_ct_zone)
619 goto out_free;
620 nf_ct_zone->id = zone;
621 }
622#endif
599 /* 623 /*
600 * changes to lookup keys must be done before setting refcnt to 1 624 * changes to lookup keys must be done before setting refcnt to 1
601 */ 625 */
602 smp_wmb(); 626 smp_wmb();
603 atomic_set(&ct->ct_general.use, 1); 627 atomic_set(&ct->ct_general.use, 1);
604 return ct; 628 return ct;
629
630#ifdef CONFIG_NF_CONNTRACK_ZONES
631out_free:
632 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
633 return ERR_PTR(-ENOMEM);
634#endif
605} 635}
606EXPORT_SYMBOL_GPL(nf_conntrack_alloc); 636EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
607 637
@@ -619,7 +649,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_free);
619/* Allocate a new conntrack: we return -ENOMEM if classification 649/* Allocate a new conntrack: we return -ENOMEM if classification
620 failed due to stress. Otherwise it really is unclassifiable. */ 650 failed due to stress. Otherwise it really is unclassifiable. */
621static struct nf_conntrack_tuple_hash * 651static struct nf_conntrack_tuple_hash *
622init_conntrack(struct net *net, 652init_conntrack(struct net *net, struct nf_conn *tmpl,
623 const struct nf_conntrack_tuple *tuple, 653 const struct nf_conntrack_tuple *tuple,
624 struct nf_conntrack_l3proto *l3proto, 654 struct nf_conntrack_l3proto *l3proto,
625 struct nf_conntrack_l4proto *l4proto, 655 struct nf_conntrack_l4proto *l4proto,
@@ -629,14 +659,16 @@ init_conntrack(struct net *net,
629 struct nf_conn *ct; 659 struct nf_conn *ct;
630 struct nf_conn_help *help; 660 struct nf_conn_help *help;
631 struct nf_conntrack_tuple repl_tuple; 661 struct nf_conntrack_tuple repl_tuple;
662 struct nf_conntrack_ecache *ecache;
632 struct nf_conntrack_expect *exp; 663 struct nf_conntrack_expect *exp;
664 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
633 665
634 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { 666 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
635 pr_debug("Can't invert tuple.\n"); 667 pr_debug("Can't invert tuple.\n");
636 return NULL; 668 return NULL;
637 } 669 }
638 670
639 ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC); 671 ct = nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC);
640 if (IS_ERR(ct)) { 672 if (IS_ERR(ct)) {
641 pr_debug("Can't allocate conntrack.\n"); 673 pr_debug("Can't allocate conntrack.\n");
642 return (struct nf_conntrack_tuple_hash *)ct; 674 return (struct nf_conntrack_tuple_hash *)ct;
@@ -649,10 +681,14 @@ init_conntrack(struct net *net,
649 } 681 }
650 682
651 nf_ct_acct_ext_add(ct, GFP_ATOMIC); 683 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
652 nf_ct_ecache_ext_add(ct, GFP_ATOMIC); 684
685 ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
686 nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
687 ecache ? ecache->expmask : 0,
688 GFP_ATOMIC);
653 689
654 spin_lock_bh(&nf_conntrack_lock); 690 spin_lock_bh(&nf_conntrack_lock);
655 exp = nf_ct_find_expectation(net, tuple); 691 exp = nf_ct_find_expectation(net, zone, tuple);
656 if (exp) { 692 if (exp) {
657 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", 693 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
658 ct, exp); 694 ct, exp);
@@ -674,7 +710,7 @@ init_conntrack(struct net *net,
674 nf_conntrack_get(&ct->master->ct_general); 710 nf_conntrack_get(&ct->master->ct_general);
675 NF_CT_STAT_INC(net, expect_new); 711 NF_CT_STAT_INC(net, expect_new);
676 } else { 712 } else {
677 __nf_ct_try_assign_helper(ct, GFP_ATOMIC); 713 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
678 NF_CT_STAT_INC(net, new); 714 NF_CT_STAT_INC(net, new);
679 } 715 }
680 716
@@ -695,7 +731,7 @@ init_conntrack(struct net *net,
695 731
696/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ 732/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
697static inline struct nf_conn * 733static inline struct nf_conn *
698resolve_normal_ct(struct net *net, 734resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
699 struct sk_buff *skb, 735 struct sk_buff *skb,
700 unsigned int dataoff, 736 unsigned int dataoff,
701 u_int16_t l3num, 737 u_int16_t l3num,
@@ -708,6 +744,7 @@ resolve_normal_ct(struct net *net,
708 struct nf_conntrack_tuple tuple; 744 struct nf_conntrack_tuple tuple;
709 struct nf_conntrack_tuple_hash *h; 745 struct nf_conntrack_tuple_hash *h;
710 struct nf_conn *ct; 746 struct nf_conn *ct;
747 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
711 748
712 if (!nf_ct_get_tuple(skb, skb_network_offset(skb), 749 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
713 dataoff, l3num, protonum, &tuple, l3proto, 750 dataoff, l3num, protonum, &tuple, l3proto,
@@ -717,9 +754,10 @@ resolve_normal_ct(struct net *net,
717 } 754 }
718 755
719 /* look for tuple match */ 756 /* look for tuple match */
720 h = nf_conntrack_find_get(net, &tuple); 757 h = nf_conntrack_find_get(net, zone, &tuple);
721 if (!h) { 758 if (!h) {
722 h = init_conntrack(net, &tuple, l3proto, l4proto, skb, dataoff); 759 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
760 skb, dataoff);
723 if (!h) 761 if (!h)
724 return NULL; 762 return NULL;
725 if (IS_ERR(h)) 763 if (IS_ERR(h))
@@ -756,7 +794,7 @@ unsigned int
756nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, 794nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
757 struct sk_buff *skb) 795 struct sk_buff *skb)
758{ 796{
759 struct nf_conn *ct; 797 struct nf_conn *ct, *tmpl = NULL;
760 enum ip_conntrack_info ctinfo; 798 enum ip_conntrack_info ctinfo;
761 struct nf_conntrack_l3proto *l3proto; 799 struct nf_conntrack_l3proto *l3proto;
762 struct nf_conntrack_l4proto *l4proto; 800 struct nf_conntrack_l4proto *l4proto;
@@ -765,10 +803,14 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
765 int set_reply = 0; 803 int set_reply = 0;
766 int ret; 804 int ret;
767 805
768 /* Previously seen (loopback or untracked)? Ignore. */
769 if (skb->nfct) { 806 if (skb->nfct) {
770 NF_CT_STAT_INC_ATOMIC(net, ignore); 807 /* Previously seen (loopback or untracked)? Ignore. */
771 return NF_ACCEPT; 808 tmpl = (struct nf_conn *)skb->nfct;
809 if (!nf_ct_is_template(tmpl)) {
810 NF_CT_STAT_INC_ATOMIC(net, ignore);
811 return NF_ACCEPT;
812 }
813 skb->nfct = NULL;
772 } 814 }
773 815
774 /* rcu_read_lock()ed by nf_hook_slow */ 816 /* rcu_read_lock()ed by nf_hook_slow */
@@ -779,7 +821,8 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
779 pr_debug("not prepared to track yet or error occured\n"); 821 pr_debug("not prepared to track yet or error occured\n");
780 NF_CT_STAT_INC_ATOMIC(net, error); 822 NF_CT_STAT_INC_ATOMIC(net, error);
781 NF_CT_STAT_INC_ATOMIC(net, invalid); 823 NF_CT_STAT_INC_ATOMIC(net, invalid);
782 return -ret; 824 ret = -ret;
825 goto out;
783 } 826 }
784 827
785 l4proto = __nf_ct_l4proto_find(pf, protonum); 828 l4proto = __nf_ct_l4proto_find(pf, protonum);
@@ -788,26 +831,30 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
788 * inverse of the return code tells to the netfilter 831 * inverse of the return code tells to the netfilter
789 * core what to do with the packet. */ 832 * core what to do with the packet. */
790 if (l4proto->error != NULL) { 833 if (l4proto->error != NULL) {
791 ret = l4proto->error(net, skb, dataoff, &ctinfo, pf, hooknum); 834 ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
835 pf, hooknum);
792 if (ret <= 0) { 836 if (ret <= 0) {
793 NF_CT_STAT_INC_ATOMIC(net, error); 837 NF_CT_STAT_INC_ATOMIC(net, error);
794 NF_CT_STAT_INC_ATOMIC(net, invalid); 838 NF_CT_STAT_INC_ATOMIC(net, invalid);
795 return -ret; 839 ret = -ret;
840 goto out;
796 } 841 }
797 } 842 }
798 843
799 ct = resolve_normal_ct(net, skb, dataoff, pf, protonum, 844 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
800 l3proto, l4proto, &set_reply, &ctinfo); 845 l3proto, l4proto, &set_reply, &ctinfo);
801 if (!ct) { 846 if (!ct) {
802 /* Not valid part of a connection */ 847 /* Not valid part of a connection */
803 NF_CT_STAT_INC_ATOMIC(net, invalid); 848 NF_CT_STAT_INC_ATOMIC(net, invalid);
804 return NF_ACCEPT; 849 ret = NF_ACCEPT;
850 goto out;
805 } 851 }
806 852
807 if (IS_ERR(ct)) { 853 if (IS_ERR(ct)) {
808 /* Too stressed to deal. */ 854 /* Too stressed to deal. */
809 NF_CT_STAT_INC_ATOMIC(net, drop); 855 NF_CT_STAT_INC_ATOMIC(net, drop);
810 return NF_DROP; 856 ret = NF_DROP;
857 goto out;
811 } 858 }
812 859
813 NF_CT_ASSERT(skb->nfct); 860 NF_CT_ASSERT(skb->nfct);
@@ -822,11 +869,15 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
822 NF_CT_STAT_INC_ATOMIC(net, invalid); 869 NF_CT_STAT_INC_ATOMIC(net, invalid);
823 if (ret == -NF_DROP) 870 if (ret == -NF_DROP)
824 NF_CT_STAT_INC_ATOMIC(net, drop); 871 NF_CT_STAT_INC_ATOMIC(net, drop);
825 return -ret; 872 ret = -ret;
873 goto out;
826 } 874 }
827 875
828 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) 876 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
829 nf_conntrack_event_cache(IPCT_STATUS, ct); 877 nf_conntrack_event_cache(IPCT_REPLY, ct);
878out:
879 if (tmpl)
880 nf_ct_put(tmpl);
830 881
831 return ret; 882 return ret;
832} 883}
@@ -865,7 +916,7 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
865 return; 916 return;
866 917
867 rcu_read_lock(); 918 rcu_read_lock();
868 __nf_ct_try_assign_helper(ct, GFP_ATOMIC); 919 __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
869 rcu_read_unlock(); 920 rcu_read_unlock();
870} 921}
871EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); 922EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
@@ -939,6 +990,14 @@ bool __nf_ct_kill_acct(struct nf_conn *ct,
939} 990}
940EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); 991EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
941 992
993#ifdef CONFIG_NF_CONNTRACK_ZONES
994static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
995 .len = sizeof(struct nf_conntrack_zone),
996 .align = __alignof__(struct nf_conntrack_zone),
997 .id = NF_CT_EXT_ZONE,
998};
999#endif
1000
942#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) 1001#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
943 1002
944#include <linux/netfilter/nfnetlink.h> 1003#include <linux/netfilter/nfnetlink.h>
@@ -1120,6 +1179,9 @@ static void nf_conntrack_cleanup_init_net(void)
1120 1179
1121 nf_conntrack_helper_fini(); 1180 nf_conntrack_helper_fini();
1122 nf_conntrack_proto_fini(); 1181 nf_conntrack_proto_fini();
1182#ifdef CONFIG_NF_CONNTRACK_ZONES
1183 nf_ct_extend_unregister(&nf_ct_zone_extend);
1184#endif
1123} 1185}
1124 1186
1125static void nf_conntrack_cleanup_net(struct net *net) 1187static void nf_conntrack_cleanup_net(struct net *net)
@@ -1195,6 +1257,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1195 unsigned int hashsize, old_size; 1257 unsigned int hashsize, old_size;
1196 struct hlist_nulls_head *hash, *old_hash; 1258 struct hlist_nulls_head *hash, *old_hash;
1197 struct nf_conntrack_tuple_hash *h; 1259 struct nf_conntrack_tuple_hash *h;
1260 struct nf_conn *ct;
1198 1261
1199 if (current->nsproxy->net_ns != &init_net) 1262 if (current->nsproxy->net_ns != &init_net)
1200 return -EOPNOTSUPP; 1263 return -EOPNOTSUPP;
@@ -1221,8 +1284,10 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1221 while (!hlist_nulls_empty(&init_net.ct.hash[i])) { 1284 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1222 h = hlist_nulls_entry(init_net.ct.hash[i].first, 1285 h = hlist_nulls_entry(init_net.ct.hash[i].first,
1223 struct nf_conntrack_tuple_hash, hnnode); 1286 struct nf_conntrack_tuple_hash, hnnode);
1287 ct = nf_ct_tuplehash_to_ctrack(h);
1224 hlist_nulls_del_rcu(&h->hnnode); 1288 hlist_nulls_del_rcu(&h->hnnode);
1225 bucket = __hash_conntrack(&h->tuple, hashsize, 1289 bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
1290 hashsize,
1226 nf_conntrack_hash_rnd); 1291 nf_conntrack_hash_rnd);
1227 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); 1292 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1228 } 1293 }
@@ -1280,6 +1345,11 @@ static int nf_conntrack_init_init_net(void)
1280 if (ret < 0) 1345 if (ret < 0)
1281 goto err_helper; 1346 goto err_helper;
1282 1347
1348#ifdef CONFIG_NF_CONNTRACK_ZONES
1349 ret = nf_ct_extend_register(&nf_ct_zone_extend);
1350 if (ret < 0)
1351 goto err_extend;
1352#endif
1283 /* Set up fake conntrack: to never be deleted, not in any hashes */ 1353 /* Set up fake conntrack: to never be deleted, not in any hashes */
1284#ifdef CONFIG_NET_NS 1354#ifdef CONFIG_NET_NS
1285 nf_conntrack_untracked.ct_net = &init_net; 1355 nf_conntrack_untracked.ct_net = &init_net;
@@ -1290,6 +1360,10 @@ static int nf_conntrack_init_init_net(void)
1290 1360
1291 return 0; 1361 return 0;
1292 1362
1363#ifdef CONFIG_NF_CONNTRACK_ZONES
1364err_extend:
1365 nf_conntrack_helper_fini();
1366#endif
1293err_helper: 1367err_helper:
1294 nf_conntrack_proto_fini(); 1368 nf_conntrack_proto_fini();
1295err_proto: 1369err_proto:
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 2f25ff61098..acb29ccaa41 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -27,6 +27,7 @@
27#include <net/netfilter/nf_conntrack_expect.h> 27#include <net/netfilter/nf_conntrack_expect.h>
28#include <net/netfilter/nf_conntrack_helper.h> 28#include <net/netfilter/nf_conntrack_helper.h>
29#include <net/netfilter/nf_conntrack_tuple.h> 29#include <net/netfilter/nf_conntrack_tuple.h>
30#include <net/netfilter/nf_conntrack_zones.h>
30 31
31unsigned int nf_ct_expect_hsize __read_mostly; 32unsigned int nf_ct_expect_hsize __read_mostly;
32EXPORT_SYMBOL_GPL(nf_ct_expect_hsize); 33EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
@@ -84,7 +85,8 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple
84} 85}
85 86
86struct nf_conntrack_expect * 87struct nf_conntrack_expect *
87__nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple) 88__nf_ct_expect_find(struct net *net, u16 zone,
89 const struct nf_conntrack_tuple *tuple)
88{ 90{
89 struct nf_conntrack_expect *i; 91 struct nf_conntrack_expect *i;
90 struct hlist_node *n; 92 struct hlist_node *n;
@@ -95,7 +97,8 @@ __nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple)
95 97
96 h = nf_ct_expect_dst_hash(tuple); 98 h = nf_ct_expect_dst_hash(tuple);
97 hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) { 99 hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
98 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) 100 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
101 nf_ct_zone(i->master) == zone)
99 return i; 102 return i;
100 } 103 }
101 return NULL; 104 return NULL;
@@ -104,12 +107,13 @@ EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
104 107
105/* Just find a expectation corresponding to a tuple. */ 108/* Just find a expectation corresponding to a tuple. */
106struct nf_conntrack_expect * 109struct nf_conntrack_expect *
107nf_ct_expect_find_get(struct net *net, const struct nf_conntrack_tuple *tuple) 110nf_ct_expect_find_get(struct net *net, u16 zone,
111 const struct nf_conntrack_tuple *tuple)
108{ 112{
109 struct nf_conntrack_expect *i; 113 struct nf_conntrack_expect *i;
110 114
111 rcu_read_lock(); 115 rcu_read_lock();
112 i = __nf_ct_expect_find(net, tuple); 116 i = __nf_ct_expect_find(net, zone, tuple);
113 if (i && !atomic_inc_not_zero(&i->use)) 117 if (i && !atomic_inc_not_zero(&i->use))
114 i = NULL; 118 i = NULL;
115 rcu_read_unlock(); 119 rcu_read_unlock();
@@ -121,7 +125,8 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
121/* If an expectation for this connection is found, it gets delete from 125/* If an expectation for this connection is found, it gets delete from
122 * global list then returned. */ 126 * global list then returned. */
123struct nf_conntrack_expect * 127struct nf_conntrack_expect *
124nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple) 128nf_ct_find_expectation(struct net *net, u16 zone,
129 const struct nf_conntrack_tuple *tuple)
125{ 130{
126 struct nf_conntrack_expect *i, *exp = NULL; 131 struct nf_conntrack_expect *i, *exp = NULL;
127 struct hlist_node *n; 132 struct hlist_node *n;
@@ -133,7 +138,8 @@ nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple)
133 h = nf_ct_expect_dst_hash(tuple); 138 h = nf_ct_expect_dst_hash(tuple);
134 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { 139 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
135 if (!(i->flags & NF_CT_EXPECT_INACTIVE) && 140 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
136 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) { 141 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
142 nf_ct_zone(i->master) == zone) {
137 exp = i; 143 exp = i;
138 break; 144 break;
139 } 145 }
@@ -204,7 +210,8 @@ static inline int expect_matches(const struct nf_conntrack_expect *a,
204{ 210{
205 return a->master == b->master && a->class == b->class && 211 return a->master == b->master && a->class == b->class &&
206 nf_ct_tuple_equal(&a->tuple, &b->tuple) && 212 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
207 nf_ct_tuple_mask_equal(&a->mask, &b->mask); 213 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
214 nf_ct_zone(a->master) == nf_ct_zone(b->master);
208} 215}
209 216
210/* Generally a bad idea to call this: could have matched already. */ 217/* Generally a bad idea to call this: could have matched already. */
@@ -232,7 +239,6 @@ struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
232 239
233 new->master = me; 240 new->master = me;
234 atomic_set(&new->use, 1); 241 atomic_set(&new->use, 1);
235 INIT_RCU_HEAD(&new->rcu);
236 return new; 242 return new;
237} 243}
238EXPORT_SYMBOL_GPL(nf_ct_expect_alloc); 244EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
@@ -500,6 +506,7 @@ static void exp_seq_stop(struct seq_file *seq, void *v)
500static int exp_seq_show(struct seq_file *s, void *v) 506static int exp_seq_show(struct seq_file *s, void *v)
501{ 507{
502 struct nf_conntrack_expect *expect; 508 struct nf_conntrack_expect *expect;
509 struct nf_conntrack_helper *helper;
503 struct hlist_node *n = v; 510 struct hlist_node *n = v;
504 char *delim = ""; 511 char *delim = "";
505 512
@@ -525,6 +532,14 @@ static int exp_seq_show(struct seq_file *s, void *v)
525 if (expect->flags & NF_CT_EXPECT_INACTIVE) 532 if (expect->flags & NF_CT_EXPECT_INACTIVE)
526 seq_printf(s, "%sINACTIVE", delim); 533 seq_printf(s, "%sINACTIVE", delim);
527 534
535 helper = rcu_dereference(nfct_help(expect->master)->helper);
536 if (helper) {
537 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
538 if (helper->expect_policy[expect->class].name)
539 seq_printf(s, "/%s",
540 helper->expect_policy[expect->class].name);
541 }
542
528 return seq_putc(s, '\n'); 543 return seq_putc(s, '\n');
529} 544}
530 545
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index fef95be334b..fdc8fb4ae10 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -59,7 +59,6 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
59 if (!*ext) 59 if (!*ext)
60 return NULL; 60 return NULL;
61 61
62 INIT_RCU_HEAD(&(*ext)->rcu);
63 (*ext)->offset[id] = off; 62 (*ext)->offset[id] = off;
64 (*ext)->len = len; 63 (*ext)->len = len;
65 64
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 66369490230..a1c8dd917e1 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -29,6 +29,7 @@
29#include <net/netfilter/nf_conntrack_expect.h> 29#include <net/netfilter/nf_conntrack_expect.h>
30#include <net/netfilter/nf_conntrack_ecache.h> 30#include <net/netfilter/nf_conntrack_ecache.h>
31#include <net/netfilter/nf_conntrack_helper.h> 31#include <net/netfilter/nf_conntrack_helper.h>
32#include <net/netfilter/nf_conntrack_zones.h>
32#include <linux/netfilter/nf_conntrack_h323.h> 33#include <linux/netfilter/nf_conntrack_h323.h>
33 34
34/* Parameters */ 35/* Parameters */
@@ -1216,7 +1217,7 @@ static struct nf_conntrack_expect *find_expect(struct nf_conn *ct,
1216 tuple.dst.u.tcp.port = port; 1217 tuple.dst.u.tcp.port = port;
1217 tuple.dst.protonum = IPPROTO_TCP; 1218 tuple.dst.protonum = IPPROTO_TCP;
1218 1219
1219 exp = __nf_ct_expect_find(net, &tuple); 1220 exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple);
1220 if (exp && exp->master == ct) 1221 if (exp && exp->master == ct)
1221 return exp; 1222 return exp;
1222 return NULL; 1223 return NULL;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 4b1a56bd074..4509fa6726f 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -65,7 +65,7 @@ __nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
65} 65}
66 66
67struct nf_conntrack_helper * 67struct nf_conntrack_helper *
68__nf_conntrack_helper_find_byname(const char *name) 68__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
69{ 69{
70 struct nf_conntrack_helper *h; 70 struct nf_conntrack_helper *h;
71 struct hlist_node *n; 71 struct hlist_node *n;
@@ -73,13 +73,34 @@ __nf_conntrack_helper_find_byname(const char *name)
73 73
74 for (i = 0; i < nf_ct_helper_hsize; i++) { 74 for (i = 0; i < nf_ct_helper_hsize; i++) {
75 hlist_for_each_entry_rcu(h, n, &nf_ct_helper_hash[i], hnode) { 75 hlist_for_each_entry_rcu(h, n, &nf_ct_helper_hash[i], hnode) {
76 if (!strcmp(h->name, name)) 76 if (!strcmp(h->name, name) &&
77 h->tuple.src.l3num == l3num &&
78 h->tuple.dst.protonum == protonum)
77 return h; 79 return h;
78 } 80 }
79 } 81 }
80 return NULL; 82 return NULL;
81} 83}
82EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find_byname); 84EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find);
85
86struct nf_conntrack_helper *
87nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
88{
89 struct nf_conntrack_helper *h;
90
91 h = __nf_conntrack_helper_find(name, l3num, protonum);
92#ifdef CONFIG_MODULES
93 if (h == NULL) {
94 if (request_module("nfct-helper-%s", name) == 0)
95 h = __nf_conntrack_helper_find(name, l3num, protonum);
96 }
97#endif
98 if (h != NULL && !try_module_get(h->me))
99 h = NULL;
100
101 return h;
102}
103EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
83 104
84struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp) 105struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
85{ 106{
@@ -94,13 +115,22 @@ struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
94} 115}
95EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add); 116EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add);
96 117
97int __nf_ct_try_assign_helper(struct nf_conn *ct, gfp_t flags) 118int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
119 gfp_t flags)
98{ 120{
121 struct nf_conntrack_helper *helper = NULL;
122 struct nf_conn_help *help;
99 int ret = 0; 123 int ret = 0;
100 struct nf_conntrack_helper *helper;
101 struct nf_conn_help *help = nfct_help(ct);
102 124
103 helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 125 if (tmpl != NULL) {
126 help = nfct_help(tmpl);
127 if (help != NULL)
128 helper = help->helper;
129 }
130
131 help = nfct_help(ct);
132 if (helper == NULL)
133 helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
104 if (helper == NULL) { 134 if (helper == NULL) {
105 if (help) 135 if (help)
106 rcu_assign_pointer(help->helper, NULL); 136 rcu_assign_pointer(help->helper, NULL);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 0ffe689dfe9..2b2af631d2b 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -30,6 +30,7 @@
30 30
31#include <linux/netfilter.h> 31#include <linux/netfilter.h>
32#include <net/netlink.h> 32#include <net/netlink.h>
33#include <net/sock.h>
33#include <net/netfilter/nf_conntrack.h> 34#include <net/netfilter/nf_conntrack.h>
34#include <net/netfilter/nf_conntrack_core.h> 35#include <net/netfilter/nf_conntrack_core.h>
35#include <net/netfilter/nf_conntrack_expect.h> 36#include <net/netfilter/nf_conntrack_expect.h>
@@ -38,6 +39,7 @@
38#include <net/netfilter/nf_conntrack_l4proto.h> 39#include <net/netfilter/nf_conntrack_l4proto.h>
39#include <net/netfilter/nf_conntrack_tuple.h> 40#include <net/netfilter/nf_conntrack_tuple.h>
40#include <net/netfilter/nf_conntrack_acct.h> 41#include <net/netfilter/nf_conntrack_acct.h>
42#include <net/netfilter/nf_conntrack_zones.h>
41#ifdef CONFIG_NF_NAT_NEEDED 43#ifdef CONFIG_NF_NAT_NEEDED
42#include <net/netfilter/nf_nat_core.h> 44#include <net/netfilter/nf_nat_core.h>
43#include <net/netfilter/nf_nat_protocol.h> 45#include <net/netfilter/nf_nat_protocol.h>
@@ -378,6 +380,9 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
378 goto nla_put_failure; 380 goto nla_put_failure;
379 nla_nest_end(skb, nest_parms); 381 nla_nest_end(skb, nest_parms);
380 382
383 if (nf_ct_zone(ct))
384 NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
385
381 if (ctnetlink_dump_status(skb, ct) < 0 || 386 if (ctnetlink_dump_status(skb, ct) < 0 ||
382 ctnetlink_dump_timeout(skb, ct) < 0 || 387 ctnetlink_dump_timeout(skb, ct) < 0 ||
383 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || 388 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
@@ -456,6 +461,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
456static int 461static int
457ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) 462ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
458{ 463{
464 struct net *net;
459 struct nlmsghdr *nlh; 465 struct nlmsghdr *nlh;
460 struct nfgenmsg *nfmsg; 466 struct nfgenmsg *nfmsg;
461 struct nlattr *nest_parms; 467 struct nlattr *nest_parms;
@@ -482,7 +488,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
482 } else 488 } else
483 return 0; 489 return 0;
484 490
485 if (!item->report && !nfnetlink_has_listeners(group)) 491 net = nf_ct_net(ct);
492 if (!item->report && !nfnetlink_has_listeners(net, group))
486 return 0; 493 return 0;
487 494
488 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC); 495 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
@@ -514,6 +521,9 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
514 goto nla_put_failure; 521 goto nla_put_failure;
515 nla_nest_end(skb, nest_parms); 522 nla_nest_end(skb, nest_parms);
516 523
524 if (nf_ct_zone(ct))
525 NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
526
517 if (ctnetlink_dump_id(skb, ct) < 0) 527 if (ctnetlink_dump_id(skb, ct) < 0)
518 goto nla_put_failure; 528 goto nla_put_failure;
519 529
@@ -559,7 +569,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
559 rcu_read_unlock(); 569 rcu_read_unlock();
560 570
561 nlmsg_end(skb, nlh); 571 nlmsg_end(skb, nlh);
562 err = nfnetlink_send(skb, item->pid, group, item->report, GFP_ATOMIC); 572 err = nfnetlink_send(skb, net, item->pid, group, item->report,
573 GFP_ATOMIC);
563 if (err == -ENOBUFS || err == -EAGAIN) 574 if (err == -ENOBUFS || err == -EAGAIN)
564 return -ENOBUFS; 575 return -ENOBUFS;
565 576
@@ -571,7 +582,7 @@ nla_put_failure:
571nlmsg_failure: 582nlmsg_failure:
572 kfree_skb(skb); 583 kfree_skb(skb);
573errout: 584errout:
574 nfnetlink_set_err(0, group, -ENOBUFS); 585 nfnetlink_set_err(net, 0, group, -ENOBUFS);
575 return 0; 586 return 0;
576} 587}
577#endif /* CONFIG_NF_CONNTRACK_EVENTS */ 588#endif /* CONFIG_NF_CONNTRACK_EVENTS */
@@ -586,6 +597,7 @@ static int ctnetlink_done(struct netlink_callback *cb)
586static int 597static int
587ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 598ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
588{ 599{
600 struct net *net = sock_net(skb->sk);
589 struct nf_conn *ct, *last; 601 struct nf_conn *ct, *last;
590 struct nf_conntrack_tuple_hash *h; 602 struct nf_conntrack_tuple_hash *h;
591 struct hlist_nulls_node *n; 603 struct hlist_nulls_node *n;
@@ -594,9 +606,9 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
594 606
595 rcu_read_lock(); 607 rcu_read_lock();
596 last = (struct nf_conn *)cb->args[1]; 608 last = (struct nf_conn *)cb->args[1];
597 for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) { 609 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
598restart: 610restart:
599 hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], 611 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]],
600 hnnode) { 612 hnnode) {
601 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) 613 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
602 continue; 614 continue;
@@ -703,6 +715,11 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
703 return ret; 715 return ret;
704} 716}
705 717
718static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
719 [CTA_TUPLE_IP] = { .type = NLA_NESTED },
720 [CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
721};
722
706static int 723static int
707ctnetlink_parse_tuple(const struct nlattr * const cda[], 724ctnetlink_parse_tuple(const struct nlattr * const cda[],
708 struct nf_conntrack_tuple *tuple, 725 struct nf_conntrack_tuple *tuple,
@@ -713,7 +730,7 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
713 730
714 memset(tuple, 0, sizeof(*tuple)); 731 memset(tuple, 0, sizeof(*tuple));
715 732
716 nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], NULL); 733 nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy);
717 734
718 if (!tb[CTA_TUPLE_IP]) 735 if (!tb[CTA_TUPLE_IP])
719 return -EINVAL; 736 return -EINVAL;
@@ -740,12 +757,31 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
740 return 0; 757 return 0;
741} 758}
742 759
760static int
761ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
762{
763 if (attr)
764#ifdef CONFIG_NF_CONNTRACK_ZONES
765 *zone = ntohs(nla_get_be16(attr));
766#else
767 return -EOPNOTSUPP;
768#endif
769 else
770 *zone = 0;
771
772 return 0;
773}
774
775static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
776 [CTA_HELP_NAME] = { .type = NLA_NUL_STRING },
777};
778
743static inline int 779static inline int
744ctnetlink_parse_help(const struct nlattr *attr, char **helper_name) 780ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
745{ 781{
746 struct nlattr *tb[CTA_HELP_MAX+1]; 782 struct nlattr *tb[CTA_HELP_MAX+1];
747 783
748 nla_parse_nested(tb, CTA_HELP_MAX, attr, NULL); 784 nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy);
749 785
750 if (!tb[CTA_HELP_NAME]) 786 if (!tb[CTA_HELP_NAME])
751 return -EINVAL; 787 return -EINVAL;
@@ -756,11 +792,18 @@ ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
756} 792}
757 793
758static const struct nla_policy ct_nla_policy[CTA_MAX+1] = { 794static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
795 [CTA_TUPLE_ORIG] = { .type = NLA_NESTED },
796 [CTA_TUPLE_REPLY] = { .type = NLA_NESTED },
759 [CTA_STATUS] = { .type = NLA_U32 }, 797 [CTA_STATUS] = { .type = NLA_U32 },
798 [CTA_PROTOINFO] = { .type = NLA_NESTED },
799 [CTA_HELP] = { .type = NLA_NESTED },
800 [CTA_NAT_SRC] = { .type = NLA_NESTED },
760 [CTA_TIMEOUT] = { .type = NLA_U32 }, 801 [CTA_TIMEOUT] = { .type = NLA_U32 },
761 [CTA_MARK] = { .type = NLA_U32 }, 802 [CTA_MARK] = { .type = NLA_U32 },
762 [CTA_USE] = { .type = NLA_U32 },
763 [CTA_ID] = { .type = NLA_U32 }, 803 [CTA_ID] = { .type = NLA_U32 },
804 [CTA_NAT_DST] = { .type = NLA_NESTED },
805 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
806 [CTA_ZONE] = { .type = NLA_U16 },
764}; 807};
765 808
766static int 809static int
@@ -768,12 +811,18 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
768 const struct nlmsghdr *nlh, 811 const struct nlmsghdr *nlh,
769 const struct nlattr * const cda[]) 812 const struct nlattr * const cda[])
770{ 813{
814 struct net *net = sock_net(ctnl);
771 struct nf_conntrack_tuple_hash *h; 815 struct nf_conntrack_tuple_hash *h;
772 struct nf_conntrack_tuple tuple; 816 struct nf_conntrack_tuple tuple;
773 struct nf_conn *ct; 817 struct nf_conn *ct;
774 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 818 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
775 u_int8_t u3 = nfmsg->nfgen_family; 819 u_int8_t u3 = nfmsg->nfgen_family;
776 int err = 0; 820 u16 zone;
821 int err;
822
823 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
824 if (err < 0)
825 return err;
777 826
778 if (cda[CTA_TUPLE_ORIG]) 827 if (cda[CTA_TUPLE_ORIG])
779 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); 828 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
@@ -781,7 +830,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
781 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3); 830 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
782 else { 831 else {
783 /* Flush the whole table */ 832 /* Flush the whole table */
784 nf_conntrack_flush_report(&init_net, 833 nf_conntrack_flush_report(net,
785 NETLINK_CB(skb).pid, 834 NETLINK_CB(skb).pid,
786 nlmsg_report(nlh)); 835 nlmsg_report(nlh));
787 return 0; 836 return 0;
@@ -790,7 +839,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
790 if (err < 0) 839 if (err < 0)
791 return err; 840 return err;
792 841
793 h = nf_conntrack_find_get(&init_net, &tuple); 842 h = nf_conntrack_find_get(net, zone, &tuple);
794 if (!h) 843 if (!h)
795 return -ENOENT; 844 return -ENOENT;
796 845
@@ -828,18 +877,24 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
828 const struct nlmsghdr *nlh, 877 const struct nlmsghdr *nlh,
829 const struct nlattr * const cda[]) 878 const struct nlattr * const cda[])
830{ 879{
880 struct net *net = sock_net(ctnl);
831 struct nf_conntrack_tuple_hash *h; 881 struct nf_conntrack_tuple_hash *h;
832 struct nf_conntrack_tuple tuple; 882 struct nf_conntrack_tuple tuple;
833 struct nf_conn *ct; 883 struct nf_conn *ct;
834 struct sk_buff *skb2 = NULL; 884 struct sk_buff *skb2 = NULL;
835 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 885 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
836 u_int8_t u3 = nfmsg->nfgen_family; 886 u_int8_t u3 = nfmsg->nfgen_family;
837 int err = 0; 887 u16 zone;
888 int err;
838 889
839 if (nlh->nlmsg_flags & NLM_F_DUMP) 890 if (nlh->nlmsg_flags & NLM_F_DUMP)
840 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, 891 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
841 ctnetlink_done); 892 ctnetlink_done);
842 893
894 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
895 if (err < 0)
896 return err;
897
843 if (cda[CTA_TUPLE_ORIG]) 898 if (cda[CTA_TUPLE_ORIG])
844 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); 899 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
845 else if (cda[CTA_TUPLE_REPLY]) 900 else if (cda[CTA_TUPLE_REPLY])
@@ -850,7 +905,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
850 if (err < 0) 905 if (err < 0)
851 return err; 906 return err;
852 907
853 h = nf_conntrack_find_get(&init_net, &tuple); 908 h = nf_conntrack_find_get(net, zone, &tuple);
854 if (!h) 909 if (!h)
855 return -ENOENT; 910 return -ENOENT;
856 911
@@ -994,7 +1049,8 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
994 return 0; 1049 return 0;
995 } 1050 }
996 1051
997 helper = __nf_conntrack_helper_find_byname(helpname); 1052 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1053 nf_ct_protonum(ct));
998 if (helper == NULL) { 1054 if (helper == NULL) {
999#ifdef CONFIG_MODULES 1055#ifdef CONFIG_MODULES
1000 spin_unlock_bh(&nf_conntrack_lock); 1056 spin_unlock_bh(&nf_conntrack_lock);
@@ -1005,7 +1061,8 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1005 } 1061 }
1006 1062
1007 spin_lock_bh(&nf_conntrack_lock); 1063 spin_lock_bh(&nf_conntrack_lock);
1008 helper = __nf_conntrack_helper_find_byname(helpname); 1064 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1065 nf_ct_protonum(ct));
1009 if (helper) 1066 if (helper)
1010 return -EAGAIN; 1067 return -EAGAIN;
1011#endif 1068#endif
@@ -1020,9 +1077,8 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1020 /* need to zero data of old helper */ 1077 /* need to zero data of old helper */
1021 memset(&help->help, 0, sizeof(help->help)); 1078 memset(&help->help, 0, sizeof(help->help));
1022 } else { 1079 } else {
1023 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 1080 /* we cannot set a helper for an existing conntrack */
1024 if (help == NULL) 1081 return -EOPNOTSUPP;
1025 return -ENOMEM;
1026 } 1082 }
1027 1083
1028 rcu_assign_pointer(help->helper, helper); 1084 rcu_assign_pointer(help->helper, helper);
@@ -1044,6 +1100,12 @@ ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[])
1044 return 0; 1100 return 0;
1045} 1101}
1046 1102
1103static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
1104 [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
1105 [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED },
1106 [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED },
1107};
1108
1047static inline int 1109static inline int
1048ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]) 1110ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[])
1049{ 1111{
@@ -1052,7 +1114,7 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]
1052 struct nf_conntrack_l4proto *l4proto; 1114 struct nf_conntrack_l4proto *l4proto;
1053 int err = 0; 1115 int err = 0;
1054 1116
1055 nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, NULL); 1117 nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy);
1056 1118
1057 rcu_read_lock(); 1119 rcu_read_lock();
1058 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 1120 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
@@ -1064,12 +1126,18 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]
1064} 1126}
1065 1127
1066#ifdef CONFIG_NF_NAT_NEEDED 1128#ifdef CONFIG_NF_NAT_NEEDED
1129static const struct nla_policy nat_seq_policy[CTA_NAT_SEQ_MAX+1] = {
1130 [CTA_NAT_SEQ_CORRECTION_POS] = { .type = NLA_U32 },
1131 [CTA_NAT_SEQ_OFFSET_BEFORE] = { .type = NLA_U32 },
1132 [CTA_NAT_SEQ_OFFSET_AFTER] = { .type = NLA_U32 },
1133};
1134
1067static inline int 1135static inline int
1068change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr) 1136change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr)
1069{ 1137{
1070 struct nlattr *cda[CTA_NAT_SEQ_MAX+1]; 1138 struct nlattr *cda[CTA_NAT_SEQ_MAX+1];
1071 1139
1072 nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, NULL); 1140 nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, nat_seq_policy);
1073 1141
1074 if (!cda[CTA_NAT_SEQ_CORRECTION_POS]) 1142 if (!cda[CTA_NAT_SEQ_CORRECTION_POS])
1075 return -EINVAL; 1143 return -EINVAL;
@@ -1175,7 +1243,8 @@ ctnetlink_change_conntrack(struct nf_conn *ct,
1175} 1243}
1176 1244
1177static struct nf_conn * 1245static struct nf_conn *
1178ctnetlink_create_conntrack(const struct nlattr * const cda[], 1246ctnetlink_create_conntrack(struct net *net, u16 zone,
1247 const struct nlattr * const cda[],
1179 struct nf_conntrack_tuple *otuple, 1248 struct nf_conntrack_tuple *otuple,
1180 struct nf_conntrack_tuple *rtuple, 1249 struct nf_conntrack_tuple *rtuple,
1181 u8 u3) 1250 u8 u3)
@@ -1184,7 +1253,7 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1184 int err = -EINVAL; 1253 int err = -EINVAL;
1185 struct nf_conntrack_helper *helper; 1254 struct nf_conntrack_helper *helper;
1186 1255
1187 ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC); 1256 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
1188 if (IS_ERR(ct)) 1257 if (IS_ERR(ct))
1189 return ERR_PTR(-ENOMEM); 1258 return ERR_PTR(-ENOMEM);
1190 1259
@@ -1193,7 +1262,6 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1193 ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT])); 1262 ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1194 1263
1195 ct->timeout.expires = jiffies + ct->timeout.expires * HZ; 1264 ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
1196 ct->status |= IPS_CONFIRMED;
1197 1265
1198 rcu_read_lock(); 1266 rcu_read_lock();
1199 if (cda[CTA_HELP]) { 1267 if (cda[CTA_HELP]) {
@@ -1203,7 +1271,8 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1203 if (err < 0) 1271 if (err < 0)
1204 goto err2; 1272 goto err2;
1205 1273
1206 helper = __nf_conntrack_helper_find_byname(helpname); 1274 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1275 nf_ct_protonum(ct));
1207 if (helper == NULL) { 1276 if (helper == NULL) {
1208 rcu_read_unlock(); 1277 rcu_read_unlock();
1209#ifdef CONFIG_MODULES 1278#ifdef CONFIG_MODULES
@@ -1213,7 +1282,9 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1213 } 1282 }
1214 1283
1215 rcu_read_lock(); 1284 rcu_read_lock();
1216 helper = __nf_conntrack_helper_find_byname(helpname); 1285 helper = __nf_conntrack_helper_find(helpname,
1286 nf_ct_l3num(ct),
1287 nf_ct_protonum(ct));
1217 if (helper) { 1288 if (helper) {
1218 err = -EAGAIN; 1289 err = -EAGAIN;
1219 goto err2; 1290 goto err2;
@@ -1236,19 +1307,24 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1236 } 1307 }
1237 } else { 1308 } else {
1238 /* try an implicit helper assignation */ 1309 /* try an implicit helper assignation */
1239 err = __nf_ct_try_assign_helper(ct, GFP_ATOMIC); 1310 err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1240 if (err < 0) 1311 if (err < 0)
1241 goto err2; 1312 goto err2;
1242 } 1313 }
1243 1314
1244 if (cda[CTA_STATUS]) { 1315 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
1245 err = ctnetlink_change_status(ct, cda); 1316 err = ctnetlink_change_nat(ct, cda);
1246 if (err < 0) 1317 if (err < 0)
1247 goto err2; 1318 goto err2;
1248 } 1319 }
1249 1320
1250 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { 1321 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1251 err = ctnetlink_change_nat(ct, cda); 1322 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
1323 /* we must add conntrack extensions before confirmation. */
1324 ct->status |= IPS_CONFIRMED;
1325
1326 if (cda[CTA_STATUS]) {
1327 err = ctnetlink_change_status(ct, cda);
1252 if (err < 0) 1328 if (err < 0)
1253 goto err2; 1329 goto err2;
1254 } 1330 }
@@ -1267,9 +1343,6 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1267 goto err2; 1343 goto err2;
1268 } 1344 }
1269 1345
1270 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1271 nf_ct_ecache_ext_add(ct, GFP_ATOMIC);
1272
1273#if defined(CONFIG_NF_CONNTRACK_MARK) 1346#if defined(CONFIG_NF_CONNTRACK_MARK)
1274 if (cda[CTA_MARK]) 1347 if (cda[CTA_MARK])
1275 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK])); 1348 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
@@ -1285,7 +1358,7 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
1285 if (err < 0) 1358 if (err < 0)
1286 goto err2; 1359 goto err2;
1287 1360
1288 master_h = nf_conntrack_find_get(&init_net, &master); 1361 master_h = nf_conntrack_find_get(net, zone, &master);
1289 if (master_h == NULL) { 1362 if (master_h == NULL) {
1290 err = -ENOENT; 1363 err = -ENOENT;
1291 goto err2; 1364 goto err2;
@@ -1313,11 +1386,17 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1313 const struct nlmsghdr *nlh, 1386 const struct nlmsghdr *nlh,
1314 const struct nlattr * const cda[]) 1387 const struct nlattr * const cda[])
1315{ 1388{
1389 struct net *net = sock_net(ctnl);
1316 struct nf_conntrack_tuple otuple, rtuple; 1390 struct nf_conntrack_tuple otuple, rtuple;
1317 struct nf_conntrack_tuple_hash *h = NULL; 1391 struct nf_conntrack_tuple_hash *h = NULL;
1318 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1392 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1319 u_int8_t u3 = nfmsg->nfgen_family; 1393 u_int8_t u3 = nfmsg->nfgen_family;
1320 int err = 0; 1394 u16 zone;
1395 int err;
1396
1397 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1398 if (err < 0)
1399 return err;
1321 1400
1322 if (cda[CTA_TUPLE_ORIG]) { 1401 if (cda[CTA_TUPLE_ORIG]) {
1323 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3); 1402 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
@@ -1333,9 +1412,9 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1333 1412
1334 spin_lock_bh(&nf_conntrack_lock); 1413 spin_lock_bh(&nf_conntrack_lock);
1335 if (cda[CTA_TUPLE_ORIG]) 1414 if (cda[CTA_TUPLE_ORIG])
1336 h = __nf_conntrack_find(&init_net, &otuple); 1415 h = __nf_conntrack_find(net, zone, &otuple);
1337 else if (cda[CTA_TUPLE_REPLY]) 1416 else if (cda[CTA_TUPLE_REPLY])
1338 h = __nf_conntrack_find(&init_net, &rtuple); 1417 h = __nf_conntrack_find(net, zone, &rtuple);
1339 1418
1340 if (h == NULL) { 1419 if (h == NULL) {
1341 err = -ENOENT; 1420 err = -ENOENT;
@@ -1343,7 +1422,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1343 struct nf_conn *ct; 1422 struct nf_conn *ct;
1344 enum ip_conntrack_events events; 1423 enum ip_conntrack_events events;
1345 1424
1346 ct = ctnetlink_create_conntrack(cda, &otuple, 1425 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
1347 &rtuple, u3); 1426 &rtuple, u3);
1348 if (IS_ERR(ct)) { 1427 if (IS_ERR(ct)) {
1349 err = PTR_ERR(ct); 1428 err = PTR_ERR(ct);
@@ -1357,7 +1436,8 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1357 else 1436 else
1358 events = IPCT_NEW; 1437 events = IPCT_NEW;
1359 1438
1360 nf_conntrack_eventmask_report((1 << IPCT_STATUS) | 1439 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1440 (1 << IPCT_ASSURED) |
1361 (1 << IPCT_HELPER) | 1441 (1 << IPCT_HELPER) |
1362 (1 << IPCT_PROTOINFO) | 1442 (1 << IPCT_PROTOINFO) |
1363 (1 << IPCT_NATSEQADJ) | 1443 (1 << IPCT_NATSEQADJ) |
@@ -1382,7 +1462,8 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1382 if (err == 0) { 1462 if (err == 0) {
1383 nf_conntrack_get(&ct->ct_general); 1463 nf_conntrack_get(&ct->ct_general);
1384 spin_unlock_bh(&nf_conntrack_lock); 1464 spin_unlock_bh(&nf_conntrack_lock);
1385 nf_conntrack_eventmask_report((1 << IPCT_STATUS) | 1465 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1466 (1 << IPCT_ASSURED) |
1386 (1 << IPCT_HELPER) | 1467 (1 << IPCT_HELPER) |
1387 (1 << IPCT_PROTOINFO) | 1468 (1 << IPCT_PROTOINFO) |
1388 (1 << IPCT_NATSEQADJ) | 1469 (1 << IPCT_NATSEQADJ) |
@@ -1469,6 +1550,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
1469 const struct nf_conntrack_expect *exp) 1550 const struct nf_conntrack_expect *exp)
1470{ 1551{
1471 struct nf_conn *master = exp->master; 1552 struct nf_conn *master = exp->master;
1553 struct nf_conntrack_helper *helper;
1472 long timeout = (exp->timeout.expires - jiffies) / HZ; 1554 long timeout = (exp->timeout.expires - jiffies) / HZ;
1473 1555
1474 if (timeout < 0) 1556 if (timeout < 0)
@@ -1485,6 +1567,9 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
1485 1567
1486 NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)); 1568 NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout));
1487 NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)); 1569 NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp));
1570 helper = rcu_dereference(nfct_help(master)->helper);
1571 if (helper)
1572 NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name);
1488 1573
1489 return 0; 1574 return 0;
1490 1575
@@ -1526,9 +1611,10 @@ nla_put_failure:
1526static int 1611static int
1527ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item) 1612ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
1528{ 1613{
1614 struct nf_conntrack_expect *exp = item->exp;
1615 struct net *net = nf_ct_exp_net(exp);
1529 struct nlmsghdr *nlh; 1616 struct nlmsghdr *nlh;
1530 struct nfgenmsg *nfmsg; 1617 struct nfgenmsg *nfmsg;
1531 struct nf_conntrack_expect *exp = item->exp;
1532 struct sk_buff *skb; 1618 struct sk_buff *skb;
1533 unsigned int type; 1619 unsigned int type;
1534 int flags = 0; 1620 int flags = 0;
@@ -1540,7 +1626,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
1540 return 0; 1626 return 0;
1541 1627
1542 if (!item->report && 1628 if (!item->report &&
1543 !nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW)) 1629 !nfnetlink_has_listeners(net, NFNLGRP_CONNTRACK_EXP_NEW))
1544 return 0; 1630 return 0;
1545 1631
1546 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 1632 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
@@ -1563,7 +1649,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
1563 rcu_read_unlock(); 1649 rcu_read_unlock();
1564 1650
1565 nlmsg_end(skb, nlh); 1651 nlmsg_end(skb, nlh);
1566 nfnetlink_send(skb, item->pid, NFNLGRP_CONNTRACK_EXP_NEW, 1652 nfnetlink_send(skb, net, item->pid, NFNLGRP_CONNTRACK_EXP_NEW,
1567 item->report, GFP_ATOMIC); 1653 item->report, GFP_ATOMIC);
1568 return 0; 1654 return 0;
1569 1655
@@ -1573,7 +1659,7 @@ nla_put_failure:
1573nlmsg_failure: 1659nlmsg_failure:
1574 kfree_skb(skb); 1660 kfree_skb(skb);
1575errout: 1661errout:
1576 nfnetlink_set_err(0, 0, -ENOBUFS); 1662 nfnetlink_set_err(net, 0, 0, -ENOBUFS);
1577 return 0; 1663 return 0;
1578} 1664}
1579#endif 1665#endif
@@ -1587,7 +1673,7 @@ static int ctnetlink_exp_done(struct netlink_callback *cb)
1587static int 1673static int
1588ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 1674ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
1589{ 1675{
1590 struct net *net = &init_net; 1676 struct net *net = sock_net(skb->sk);
1591 struct nf_conntrack_expect *exp, *last; 1677 struct nf_conntrack_expect *exp, *last;
1592 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 1678 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1593 struct hlist_node *n; 1679 struct hlist_node *n;
@@ -1631,8 +1717,12 @@ out:
1631} 1717}
1632 1718
1633static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = { 1719static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
1720 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
1721 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
1722 [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
1634 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 }, 1723 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
1635 [CTA_EXPECT_ID] = { .type = NLA_U32 }, 1724 [CTA_EXPECT_ID] = { .type = NLA_U32 },
1725 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING },
1636}; 1726};
1637 1727
1638static int 1728static int
@@ -1640,12 +1730,14 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1640 const struct nlmsghdr *nlh, 1730 const struct nlmsghdr *nlh,
1641 const struct nlattr * const cda[]) 1731 const struct nlattr * const cda[])
1642{ 1732{
1733 struct net *net = sock_net(ctnl);
1643 struct nf_conntrack_tuple tuple; 1734 struct nf_conntrack_tuple tuple;
1644 struct nf_conntrack_expect *exp; 1735 struct nf_conntrack_expect *exp;
1645 struct sk_buff *skb2; 1736 struct sk_buff *skb2;
1646 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1737 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1647 u_int8_t u3 = nfmsg->nfgen_family; 1738 u_int8_t u3 = nfmsg->nfgen_family;
1648 int err = 0; 1739 u16 zone;
1740 int err;
1649 1741
1650 if (nlh->nlmsg_flags & NLM_F_DUMP) { 1742 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1651 return netlink_dump_start(ctnl, skb, nlh, 1743 return netlink_dump_start(ctnl, skb, nlh,
@@ -1653,6 +1745,10 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1653 ctnetlink_exp_done); 1745 ctnetlink_exp_done);
1654 } 1746 }
1655 1747
1748 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
1749 if (err < 0)
1750 return err;
1751
1656 if (cda[CTA_EXPECT_MASTER]) 1752 if (cda[CTA_EXPECT_MASTER])
1657 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3); 1753 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
1658 else 1754 else
@@ -1661,7 +1757,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1661 if (err < 0) 1757 if (err < 0)
1662 return err; 1758 return err;
1663 1759
1664 exp = nf_ct_expect_find_get(&init_net, &tuple); 1760 exp = nf_ct_expect_find_get(net, zone, &tuple);
1665 if (!exp) 1761 if (!exp)
1666 return -ENOENT; 1762 return -ENOENT;
1667 1763
@@ -1701,23 +1797,28 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1701 const struct nlmsghdr *nlh, 1797 const struct nlmsghdr *nlh,
1702 const struct nlattr * const cda[]) 1798 const struct nlattr * const cda[])
1703{ 1799{
1800 struct net *net = sock_net(ctnl);
1704 struct nf_conntrack_expect *exp; 1801 struct nf_conntrack_expect *exp;
1705 struct nf_conntrack_tuple tuple; 1802 struct nf_conntrack_tuple tuple;
1706 struct nf_conntrack_helper *h;
1707 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1803 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1708 struct hlist_node *n, *next; 1804 struct hlist_node *n, *next;
1709 u_int8_t u3 = nfmsg->nfgen_family; 1805 u_int8_t u3 = nfmsg->nfgen_family;
1710 unsigned int i; 1806 unsigned int i;
1807 u16 zone;
1711 int err; 1808 int err;
1712 1809
1713 if (cda[CTA_EXPECT_TUPLE]) { 1810 if (cda[CTA_EXPECT_TUPLE]) {
1714 /* delete a single expect by tuple */ 1811 /* delete a single expect by tuple */
1812 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
1813 if (err < 0)
1814 return err;
1815
1715 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); 1816 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
1716 if (err < 0) 1817 if (err < 0)
1717 return err; 1818 return err;
1718 1819
1719 /* bump usage count to 2 */ 1820 /* bump usage count to 2 */
1720 exp = nf_ct_expect_find_get(&init_net, &tuple); 1821 exp = nf_ct_expect_find_get(net, zone, &tuple);
1721 if (!exp) 1822 if (!exp)
1722 return -ENOENT; 1823 return -ENOENT;
1723 1824
@@ -1740,18 +1841,13 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1740 1841
1741 /* delete all expectations for this helper */ 1842 /* delete all expectations for this helper */
1742 spin_lock_bh(&nf_conntrack_lock); 1843 spin_lock_bh(&nf_conntrack_lock);
1743 h = __nf_conntrack_helper_find_byname(name);
1744 if (!h) {
1745 spin_unlock_bh(&nf_conntrack_lock);
1746 return -EOPNOTSUPP;
1747 }
1748 for (i = 0; i < nf_ct_expect_hsize; i++) { 1844 for (i = 0; i < nf_ct_expect_hsize; i++) {
1749 hlist_for_each_entry_safe(exp, n, next, 1845 hlist_for_each_entry_safe(exp, n, next,
1750 &init_net.ct.expect_hash[i], 1846 &net->ct.expect_hash[i],
1751 hnode) { 1847 hnode) {
1752 m_help = nfct_help(exp->master); 1848 m_help = nfct_help(exp->master);
1753 if (m_help->helper == h 1849 if (!strcmp(m_help->helper->name, name) &&
1754 && del_timer(&exp->timeout)) { 1850 del_timer(&exp->timeout)) {
1755 nf_ct_unlink_expect(exp); 1851 nf_ct_unlink_expect(exp);
1756 nf_ct_expect_put(exp); 1852 nf_ct_expect_put(exp);
1757 } 1853 }
@@ -1763,7 +1859,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1763 spin_lock_bh(&nf_conntrack_lock); 1859 spin_lock_bh(&nf_conntrack_lock);
1764 for (i = 0; i < nf_ct_expect_hsize; i++) { 1860 for (i = 0; i < nf_ct_expect_hsize; i++) {
1765 hlist_for_each_entry_safe(exp, n, next, 1861 hlist_for_each_entry_safe(exp, n, next,
1766 &init_net.ct.expect_hash[i], 1862 &net->ct.expect_hash[i],
1767 hnode) { 1863 hnode) {
1768 if (del_timer(&exp->timeout)) { 1864 if (del_timer(&exp->timeout)) {
1769 nf_ct_unlink_expect(exp); 1865 nf_ct_unlink_expect(exp);
@@ -1784,7 +1880,9 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
1784} 1880}
1785 1881
1786static int 1882static int
1787ctnetlink_create_expect(const struct nlattr * const cda[], u_int8_t u3, 1883ctnetlink_create_expect(struct net *net, u16 zone,
1884 const struct nlattr * const cda[],
1885 u_int8_t u3,
1788 u32 pid, int report) 1886 u32 pid, int report)
1789{ 1887{
1790 struct nf_conntrack_tuple tuple, mask, master_tuple; 1888 struct nf_conntrack_tuple tuple, mask, master_tuple;
@@ -1806,7 +1904,7 @@ ctnetlink_create_expect(const struct nlattr * const cda[], u_int8_t u3,
1806 return err; 1904 return err;
1807 1905
1808 /* Look for master conntrack of this expectation */ 1906 /* Look for master conntrack of this expectation */
1809 h = nf_conntrack_find_get(&init_net, &master_tuple); 1907 h = nf_conntrack_find_get(net, zone, &master_tuple);
1810 if (!h) 1908 if (!h)
1811 return -ENOENT; 1909 return -ENOENT;
1812 ct = nf_ct_tuplehash_to_ctrack(h); 1910 ct = nf_ct_tuplehash_to_ctrack(h);
@@ -1846,29 +1944,35 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
1846 const struct nlmsghdr *nlh, 1944 const struct nlmsghdr *nlh,
1847 const struct nlattr * const cda[]) 1945 const struct nlattr * const cda[])
1848{ 1946{
1947 struct net *net = sock_net(ctnl);
1849 struct nf_conntrack_tuple tuple; 1948 struct nf_conntrack_tuple tuple;
1850 struct nf_conntrack_expect *exp; 1949 struct nf_conntrack_expect *exp;
1851 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1950 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1852 u_int8_t u3 = nfmsg->nfgen_family; 1951 u_int8_t u3 = nfmsg->nfgen_family;
1853 int err = 0; 1952 u16 zone;
1953 int err;
1854 1954
1855 if (!cda[CTA_EXPECT_TUPLE] 1955 if (!cda[CTA_EXPECT_TUPLE]
1856 || !cda[CTA_EXPECT_MASK] 1956 || !cda[CTA_EXPECT_MASK]
1857 || !cda[CTA_EXPECT_MASTER]) 1957 || !cda[CTA_EXPECT_MASTER])
1858 return -EINVAL; 1958 return -EINVAL;
1859 1959
1960 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
1961 if (err < 0)
1962 return err;
1963
1860 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); 1964 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
1861 if (err < 0) 1965 if (err < 0)
1862 return err; 1966 return err;
1863 1967
1864 spin_lock_bh(&nf_conntrack_lock); 1968 spin_lock_bh(&nf_conntrack_lock);
1865 exp = __nf_ct_expect_find(&init_net, &tuple); 1969 exp = __nf_ct_expect_find(net, zone, &tuple);
1866 1970
1867 if (!exp) { 1971 if (!exp) {
1868 spin_unlock_bh(&nf_conntrack_lock); 1972 spin_unlock_bh(&nf_conntrack_lock);
1869 err = -ENOENT; 1973 err = -ENOENT;
1870 if (nlh->nlmsg_flags & NLM_F_CREATE) { 1974 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1871 err = ctnetlink_create_expect(cda, 1975 err = ctnetlink_create_expect(net, zone, cda,
1872 u3, 1976 u3,
1873 NETLINK_CB(skb).pid, 1977 NETLINK_CB(skb).pid,
1874 nlmsg_report(nlh)); 1978 nlmsg_report(nlh));
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 3807ac7faf4..088944824e1 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -28,6 +28,7 @@
28#include <net/netfilter/nf_conntrack.h> 28#include <net/netfilter/nf_conntrack.h>
29#include <net/netfilter/nf_conntrack_core.h> 29#include <net/netfilter/nf_conntrack_core.h>
30#include <net/netfilter/nf_conntrack_helper.h> 30#include <net/netfilter/nf_conntrack_helper.h>
31#include <net/netfilter/nf_conntrack_zones.h>
31#include <linux/netfilter/nf_conntrack_proto_gre.h> 32#include <linux/netfilter/nf_conntrack_proto_gre.h>
32#include <linux/netfilter/nf_conntrack_pptp.h> 33#include <linux/netfilter/nf_conntrack_pptp.h>
33 34
@@ -123,7 +124,7 @@ static void pptp_expectfn(struct nf_conn *ct,
123 pr_debug("trying to unexpect other dir: "); 124 pr_debug("trying to unexpect other dir: ");
124 nf_ct_dump_tuple(&inv_t); 125 nf_ct_dump_tuple(&inv_t);
125 126
126 exp_other = nf_ct_expect_find_get(net, &inv_t); 127 exp_other = nf_ct_expect_find_get(net, nf_ct_zone(ct), &inv_t);
127 if (exp_other) { 128 if (exp_other) {
128 /* delete other expectation. */ 129 /* delete other expectation. */
129 pr_debug("found\n"); 130 pr_debug("found\n");
@@ -136,17 +137,18 @@ static void pptp_expectfn(struct nf_conn *ct,
136 rcu_read_unlock(); 137 rcu_read_unlock();
137} 138}
138 139
139static int destroy_sibling_or_exp(struct net *net, 140static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct,
140 const struct nf_conntrack_tuple *t) 141 const struct nf_conntrack_tuple *t)
141{ 142{
142 const struct nf_conntrack_tuple_hash *h; 143 const struct nf_conntrack_tuple_hash *h;
143 struct nf_conntrack_expect *exp; 144 struct nf_conntrack_expect *exp;
144 struct nf_conn *sibling; 145 struct nf_conn *sibling;
146 u16 zone = nf_ct_zone(ct);
145 147
146 pr_debug("trying to timeout ct or exp for tuple "); 148 pr_debug("trying to timeout ct or exp for tuple ");
147 nf_ct_dump_tuple(t); 149 nf_ct_dump_tuple(t);
148 150
149 h = nf_conntrack_find_get(net, t); 151 h = nf_conntrack_find_get(net, zone, t);
150 if (h) { 152 if (h) {
151 sibling = nf_ct_tuplehash_to_ctrack(h); 153 sibling = nf_ct_tuplehash_to_ctrack(h);
152 pr_debug("setting timeout of conntrack %p to 0\n", sibling); 154 pr_debug("setting timeout of conntrack %p to 0\n", sibling);
@@ -157,7 +159,7 @@ static int destroy_sibling_or_exp(struct net *net,
157 nf_ct_put(sibling); 159 nf_ct_put(sibling);
158 return 1; 160 return 1;
159 } else { 161 } else {
160 exp = nf_ct_expect_find_get(net, t); 162 exp = nf_ct_expect_find_get(net, zone, t);
161 if (exp) { 163 if (exp) {
162 pr_debug("unexpect_related of expect %p\n", exp); 164 pr_debug("unexpect_related of expect %p\n", exp);
163 nf_ct_unexpect_related(exp); 165 nf_ct_unexpect_related(exp);
@@ -182,7 +184,7 @@ static void pptp_destroy_siblings(struct nf_conn *ct)
182 t.dst.protonum = IPPROTO_GRE; 184 t.dst.protonum = IPPROTO_GRE;
183 t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id; 185 t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id;
184 t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id; 186 t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id;
185 if (!destroy_sibling_or_exp(net, &t)) 187 if (!destroy_sibling_or_exp(net, ct, &t))
186 pr_debug("failed to timeout original pns->pac ct/exp\n"); 188 pr_debug("failed to timeout original pns->pac ct/exp\n");
187 189
188 /* try reply (pac->pns) tuple */ 190 /* try reply (pac->pns) tuple */
@@ -190,7 +192,7 @@ static void pptp_destroy_siblings(struct nf_conn *ct)
190 t.dst.protonum = IPPROTO_GRE; 192 t.dst.protonum = IPPROTO_GRE;
191 t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id; 193 t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id;
192 t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id; 194 t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id;
193 if (!destroy_sibling_or_exp(net, &t)) 195 if (!destroy_sibling_or_exp(net, ct, &t))
194 pr_debug("failed to timeout reply pac->pns ct/exp\n"); 196 pr_debug("failed to timeout reply pac->pns ct/exp\n");
195} 197}
196 198
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index dd375500dcc..9a281554937 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -561,8 +561,9 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
561 return NF_ACCEPT; 561 return NF_ACCEPT;
562} 562}
563 563
564static int dccp_error(struct net *net, struct sk_buff *skb, 564static int dccp_error(struct net *net, struct nf_conn *tmpl,
565 unsigned int dataoff, enum ip_conntrack_info *ctinfo, 565 struct sk_buff *skb, unsigned int dataoff,
566 enum ip_conntrack_info *ctinfo,
566 u_int8_t pf, unsigned int hooknum) 567 u_int8_t pf, unsigned int hooknum)
567{ 568{
568 struct dccp_hdr _dh, *dh; 569 struct dccp_hdr _dh, *dh;
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index c99cfba64dd..d899b1a6994 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -241,7 +241,7 @@ static int gre_packet(struct nf_conn *ct,
241 ct->proto.gre.stream_timeout); 241 ct->proto.gre.stream_timeout);
242 /* Also, more likely to be important, and not a probe. */ 242 /* Also, more likely to be important, and not a probe. */
243 set_bit(IPS_ASSURED_BIT, &ct->status); 243 set_bit(IPS_ASSURED_BIT, &ct->status);
244 nf_conntrack_event_cache(IPCT_STATUS, ct); 244 nf_conntrack_event_cache(IPCT_ASSURED, ct);
245 } else 245 } else
246 nf_ct_refresh_acct(ct, ctinfo, skb, 246 nf_ct_refresh_acct(ct, ctinfo, skb,
247 ct->proto.gre.timeout); 247 ct->proto.gre.timeout);
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index f9d930f8027..b68ff15ed97 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -377,7 +377,7 @@ static int sctp_packet(struct nf_conn *ct,
377 new_state == SCTP_CONNTRACK_ESTABLISHED) { 377 new_state == SCTP_CONNTRACK_ESTABLISHED) {
378 pr_debug("Setting assured bit\n"); 378 pr_debug("Setting assured bit\n");
379 set_bit(IPS_ASSURED_BIT, &ct->status); 379 set_bit(IPS_ASSURED_BIT, &ct->status);
380 nf_conntrack_event_cache(IPCT_STATUS, ct); 380 nf_conntrack_event_cache(IPCT_ASSURED, ct);
381 } 381 }
382 382
383 return NF_ACCEPT; 383 return NF_ACCEPT;
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 3c96437b45a..9dd8cd4fb6e 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -760,7 +760,7 @@ static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) + 1] =
760}; 760};
761 761
762/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */ 762/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
763static int tcp_error(struct net *net, 763static int tcp_error(struct net *net, struct nf_conn *tmpl,
764 struct sk_buff *skb, 764 struct sk_buff *skb,
765 unsigned int dataoff, 765 unsigned int dataoff,
766 enum ip_conntrack_info *ctinfo, 766 enum ip_conntrack_info *ctinfo,
@@ -1045,7 +1045,7 @@ static int tcp_packet(struct nf_conn *ct,
1045 after SYN_RECV or a valid answer for a picked up 1045 after SYN_RECV or a valid answer for a picked up
1046 connection. */ 1046 connection. */
1047 set_bit(IPS_ASSURED_BIT, &ct->status); 1047 set_bit(IPS_ASSURED_BIT, &ct->status);
1048 nf_conntrack_event_cache(IPCT_STATUS, ct); 1048 nf_conntrack_event_cache(IPCT_ASSURED, ct);
1049 } 1049 }
1050 nf_ct_refresh_acct(ct, ctinfo, skb, timeout); 1050 nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
1051 1051
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 5c5518bedb4..8289088b821 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -77,7 +77,7 @@ static int udp_packet(struct nf_conn *ct,
77 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout_stream); 77 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout_stream);
78 /* Also, more likely to be important, and not a probe */ 78 /* Also, more likely to be important, and not a probe */
79 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) 79 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
80 nf_conntrack_event_cache(IPCT_STATUS, ct); 80 nf_conntrack_event_cache(IPCT_ASSURED, ct);
81 } else 81 } else
82 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout); 82 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout);
83 83
@@ -91,8 +91,8 @@ static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
91 return true; 91 return true;
92} 92}
93 93
94static int udp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff, 94static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
95 enum ip_conntrack_info *ctinfo, 95 unsigned int dataoff, enum ip_conntrack_info *ctinfo,
96 u_int8_t pf, 96 u_int8_t pf,
97 unsigned int hooknum) 97 unsigned int hooknum)
98{ 98{
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 458655bb210..263b5a72588 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -75,7 +75,7 @@ static int udplite_packet(struct nf_conn *ct,
75 nf_ct_udplite_timeout_stream); 75 nf_ct_udplite_timeout_stream);
76 /* Also, more likely to be important, and not a probe */ 76 /* Also, more likely to be important, and not a probe */
77 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) 77 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
78 nf_conntrack_event_cache(IPCT_STATUS, ct); 78 nf_conntrack_event_cache(IPCT_ASSURED, ct);
79 } else 79 } else
80 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udplite_timeout); 80 nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udplite_timeout);
81 81
@@ -89,7 +89,7 @@ static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb,
89 return true; 89 return true;
90} 90}
91 91
92static int udplite_error(struct net *net, 92static int udplite_error(struct net *net, struct nf_conn *tmpl,
93 struct sk_buff *skb, 93 struct sk_buff *skb,
94 unsigned int dataoff, 94 unsigned int dataoff,
95 enum ip_conntrack_info *ctinfo, 95 enum ip_conntrack_info *ctinfo,
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 023966b569b..8dd75d90efc 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -16,12 +16,14 @@
16#include <linux/inet.h> 16#include <linux/inet.h>
17#include <linux/in.h> 17#include <linux/in.h>
18#include <linux/udp.h> 18#include <linux/udp.h>
19#include <linux/tcp.h>
19#include <linux/netfilter.h> 20#include <linux/netfilter.h>
20 21
21#include <net/netfilter/nf_conntrack.h> 22#include <net/netfilter/nf_conntrack.h>
22#include <net/netfilter/nf_conntrack_core.h> 23#include <net/netfilter/nf_conntrack_core.h>
23#include <net/netfilter/nf_conntrack_expect.h> 24#include <net/netfilter/nf_conntrack_expect.h>
24#include <net/netfilter/nf_conntrack_helper.h> 25#include <net/netfilter/nf_conntrack_helper.h>
26#include <net/netfilter/nf_conntrack_zones.h>
25#include <linux/netfilter/nf_conntrack_sip.h> 27#include <linux/netfilter/nf_conntrack_sip.h>
26 28
27MODULE_LICENSE("GPL"); 29MODULE_LICENSE("GPL");
@@ -50,12 +52,16 @@ module_param(sip_direct_media, int, 0600);
50MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling " 52MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
51 "endpoints only (default 1)"); 53 "endpoints only (default 1)");
52 54
53unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, 55unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int dataoff,
54 const char **dptr, 56 const char **dptr,
55 unsigned int *datalen) __read_mostly; 57 unsigned int *datalen) __read_mostly;
56EXPORT_SYMBOL_GPL(nf_nat_sip_hook); 58EXPORT_SYMBOL_GPL(nf_nat_sip_hook);
57 59
60void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off) __read_mostly;
61EXPORT_SYMBOL_GPL(nf_nat_sip_seq_adjust_hook);
62
58unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb, 63unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
64 unsigned int dataoff,
59 const char **dptr, 65 const char **dptr,
60 unsigned int *datalen, 66 unsigned int *datalen,
61 struct nf_conntrack_expect *exp, 67 struct nf_conntrack_expect *exp,
@@ -63,17 +69,17 @@ unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
63 unsigned int matchlen) __read_mostly; 69 unsigned int matchlen) __read_mostly;
64EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook); 70EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook);
65 71
66unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, 72unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int dataoff,
67 const char **dptr, 73 const char **dptr,
68 unsigned int dataoff,
69 unsigned int *datalen, 74 unsigned int *datalen,
75 unsigned int sdpoff,
70 enum sdp_header_types type, 76 enum sdp_header_types type,
71 enum sdp_header_types term, 77 enum sdp_header_types term,
72 const union nf_inet_addr *addr) 78 const union nf_inet_addr *addr)
73 __read_mostly; 79 __read_mostly;
74EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook); 80EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook);
75 81
76unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, 82unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int dataoff,
77 const char **dptr, 83 const char **dptr,
78 unsigned int *datalen, 84 unsigned int *datalen,
79 unsigned int matchoff, 85 unsigned int matchoff,
@@ -82,14 +88,15 @@ unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
82EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook); 88EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook);
83 89
84unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb, 90unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
85 const char **dptr,
86 unsigned int dataoff, 91 unsigned int dataoff,
92 const char **dptr,
87 unsigned int *datalen, 93 unsigned int *datalen,
94 unsigned int sdpoff,
88 const union nf_inet_addr *addr) 95 const union nf_inet_addr *addr)
89 __read_mostly; 96 __read_mostly;
90EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook); 97EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook);
91 98
92unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, 99unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int dataoff,
93 const char **dptr, 100 const char **dptr,
94 unsigned int *datalen, 101 unsigned int *datalen,
95 struct nf_conntrack_expect *rtp_exp, 102 struct nf_conntrack_expect *rtp_exp,
@@ -236,12 +243,13 @@ int ct_sip_parse_request(const struct nf_conn *ct,
236 return 0; 243 return 0;
237 244
238 /* Find SIP URI */ 245 /* Find SIP URI */
239 limit -= strlen("sip:"); 246 for (; dptr < limit - strlen("sip:"); dptr++) {
240 for (; dptr < limit; dptr++) {
241 if (*dptr == '\r' || *dptr == '\n') 247 if (*dptr == '\r' || *dptr == '\n')
242 return -1; 248 return -1;
243 if (strnicmp(dptr, "sip:", strlen("sip:")) == 0) 249 if (strnicmp(dptr, "sip:", strlen("sip:")) == 0) {
250 dptr += strlen("sip:");
244 break; 251 break;
252 }
245 } 253 }
246 if (!skp_epaddr_len(ct, dptr, limit, &shift)) 254 if (!skp_epaddr_len(ct, dptr, limit, &shift))
247 return 0; 255 return 0;
@@ -284,7 +292,8 @@ static const struct sip_header ct_sip_hdrs[] = {
284 [SIP_HDR_FROM] = SIP_HDR("From", "f", "sip:", skp_epaddr_len), 292 [SIP_HDR_FROM] = SIP_HDR("From", "f", "sip:", skp_epaddr_len),
285 [SIP_HDR_TO] = SIP_HDR("To", "t", "sip:", skp_epaddr_len), 293 [SIP_HDR_TO] = SIP_HDR("To", "t", "sip:", skp_epaddr_len),
286 [SIP_HDR_CONTACT] = SIP_HDR("Contact", "m", "sip:", skp_epaddr_len), 294 [SIP_HDR_CONTACT] = SIP_HDR("Contact", "m", "sip:", skp_epaddr_len),
287 [SIP_HDR_VIA] = SIP_HDR("Via", "v", "UDP ", epaddr_len), 295 [SIP_HDR_VIA_UDP] = SIP_HDR("Via", "v", "UDP ", epaddr_len),
296 [SIP_HDR_VIA_TCP] = SIP_HDR("Via", "v", "TCP ", epaddr_len),
288 [SIP_HDR_EXPIRES] = SIP_HDR("Expires", NULL, NULL, digits_len), 297 [SIP_HDR_EXPIRES] = SIP_HDR("Expires", NULL, NULL, digits_len),
289 [SIP_HDR_CONTENT_LENGTH] = SIP_HDR("Content-Length", "l", NULL, digits_len), 298 [SIP_HDR_CONTENT_LENGTH] = SIP_HDR("Content-Length", "l", NULL, digits_len),
290}; 299};
@@ -516,6 +525,33 @@ int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
516} 525}
517EXPORT_SYMBOL_GPL(ct_sip_parse_header_uri); 526EXPORT_SYMBOL_GPL(ct_sip_parse_header_uri);
518 527
528static int ct_sip_parse_param(const struct nf_conn *ct, const char *dptr,
529 unsigned int dataoff, unsigned int datalen,
530 const char *name,
531 unsigned int *matchoff, unsigned int *matchlen)
532{
533 const char *limit = dptr + datalen;
534 const char *start;
535 const char *end;
536
537 limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(","));
538 if (!limit)
539 limit = dptr + datalen;
540
541 start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name));
542 if (!start)
543 return 0;
544 start += strlen(name);
545
546 end = ct_sip_header_search(start, limit, ";", strlen(";"));
547 if (!end)
548 end = limit;
549
550 *matchoff = start - dptr;
551 *matchlen = end - start;
552 return 1;
553}
554
519/* Parse address from header parameter and return address, offset and length */ 555/* Parse address from header parameter and return address, offset and length */
520int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr, 556int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
521 unsigned int dataoff, unsigned int datalen, 557 unsigned int dataoff, unsigned int datalen,
@@ -574,6 +610,29 @@ int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
574} 610}
575EXPORT_SYMBOL_GPL(ct_sip_parse_numerical_param); 611EXPORT_SYMBOL_GPL(ct_sip_parse_numerical_param);
576 612
613static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr,
614 unsigned int dataoff, unsigned int datalen,
615 u8 *proto)
616{
617 unsigned int matchoff, matchlen;
618
619 if (ct_sip_parse_param(ct, dptr, dataoff, datalen, "transport=",
620 &matchoff, &matchlen)) {
621 if (!strnicmp(dptr + matchoff, "TCP", strlen("TCP")))
622 *proto = IPPROTO_TCP;
623 else if (!strnicmp(dptr + matchoff, "UDP", strlen("UDP")))
624 *proto = IPPROTO_UDP;
625 else
626 return 0;
627
628 if (*proto != nf_ct_protonum(ct))
629 return 0;
630 } else
631 *proto = nf_ct_protonum(ct);
632
633 return 1;
634}
635
577/* SDP header parsing: a SDP session description contains an ordered set of 636/* SDP header parsing: a SDP session description contains an ordered set of
578 * headers, starting with a section containing general session parameters, 637 * headers, starting with a section containing general session parameters,
579 * optionally followed by multiple media descriptions. 638 * optionally followed by multiple media descriptions.
@@ -682,7 +741,7 @@ static int ct_sip_parse_sdp_addr(const struct nf_conn *ct, const char *dptr,
682 741
683static int refresh_signalling_expectation(struct nf_conn *ct, 742static int refresh_signalling_expectation(struct nf_conn *ct,
684 union nf_inet_addr *addr, 743 union nf_inet_addr *addr,
685 __be16 port, 744 u8 proto, __be16 port,
686 unsigned int expires) 745 unsigned int expires)
687{ 746{
688 struct nf_conn_help *help = nfct_help(ct); 747 struct nf_conn_help *help = nfct_help(ct);
@@ -694,6 +753,7 @@ static int refresh_signalling_expectation(struct nf_conn *ct,
694 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { 753 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
695 if (exp->class != SIP_EXPECT_SIGNALLING || 754 if (exp->class != SIP_EXPECT_SIGNALLING ||
696 !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) || 755 !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) ||
756 exp->tuple.dst.protonum != proto ||
697 exp->tuple.dst.u.udp.port != port) 757 exp->tuple.dst.u.udp.port != port)
698 continue; 758 continue;
699 if (!del_timer(&exp->timeout)) 759 if (!del_timer(&exp->timeout))
@@ -728,7 +788,7 @@ static void flush_expectations(struct nf_conn *ct, bool media)
728 spin_unlock_bh(&nf_conntrack_lock); 788 spin_unlock_bh(&nf_conntrack_lock);
729} 789}
730 790
731static int set_expected_rtp_rtcp(struct sk_buff *skb, 791static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff,
732 const char **dptr, unsigned int *datalen, 792 const char **dptr, unsigned int *datalen,
733 union nf_inet_addr *daddr, __be16 port, 793 union nf_inet_addr *daddr, __be16 port,
734 enum sip_expectation_classes class, 794 enum sip_expectation_classes class,
@@ -777,7 +837,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb,
777 837
778 rcu_read_lock(); 838 rcu_read_lock();
779 do { 839 do {
780 exp = __nf_ct_expect_find(net, &tuple); 840 exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple);
781 841
782 if (!exp || exp->master == ct || 842 if (!exp || exp->master == ct ||
783 nfct_help(exp->master)->helper != nfct_help(ct)->helper || 843 nfct_help(exp->master)->helper != nfct_help(ct)->helper ||
@@ -805,7 +865,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb,
805 if (direct_rtp) { 865 if (direct_rtp) {
806 nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook); 866 nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook);
807 if (nf_nat_sdp_port && 867 if (nf_nat_sdp_port &&
808 !nf_nat_sdp_port(skb, dptr, datalen, 868 !nf_nat_sdp_port(skb, dataoff, dptr, datalen,
809 mediaoff, medialen, ntohs(rtp_port))) 869 mediaoff, medialen, ntohs(rtp_port)))
810 goto err1; 870 goto err1;
811 } 871 }
@@ -827,7 +887,8 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb,
827 887
828 nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook); 888 nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook);
829 if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp) 889 if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp)
830 ret = nf_nat_sdp_media(skb, dptr, datalen, rtp_exp, rtcp_exp, 890 ret = nf_nat_sdp_media(skb, dataoff, dptr, datalen,
891 rtp_exp, rtcp_exp,
831 mediaoff, medialen, daddr); 892 mediaoff, medialen, daddr);
832 else { 893 else {
833 if (nf_ct_expect_related(rtp_exp) == 0) { 894 if (nf_ct_expect_related(rtp_exp) == 0) {
@@ -847,6 +908,7 @@ err1:
847static const struct sdp_media_type sdp_media_types[] = { 908static const struct sdp_media_type sdp_media_types[] = {
848 SDP_MEDIA_TYPE("audio ", SIP_EXPECT_AUDIO), 909 SDP_MEDIA_TYPE("audio ", SIP_EXPECT_AUDIO),
849 SDP_MEDIA_TYPE("video ", SIP_EXPECT_VIDEO), 910 SDP_MEDIA_TYPE("video ", SIP_EXPECT_VIDEO),
911 SDP_MEDIA_TYPE("image ", SIP_EXPECT_IMAGE),
850}; 912};
851 913
852static const struct sdp_media_type *sdp_media_type(const char *dptr, 914static const struct sdp_media_type *sdp_media_type(const char *dptr,
@@ -866,13 +928,12 @@ static const struct sdp_media_type *sdp_media_type(const char *dptr,
866 return NULL; 928 return NULL;
867} 929}
868 930
869static int process_sdp(struct sk_buff *skb, 931static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
870 const char **dptr, unsigned int *datalen, 932 const char **dptr, unsigned int *datalen,
871 unsigned int cseq) 933 unsigned int cseq)
872{ 934{
873 enum ip_conntrack_info ctinfo; 935 enum ip_conntrack_info ctinfo;
874 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 936 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
875 struct nf_conn_help *help = nfct_help(ct);
876 unsigned int matchoff, matchlen; 937 unsigned int matchoff, matchlen;
877 unsigned int mediaoff, medialen; 938 unsigned int mediaoff, medialen;
878 unsigned int sdpoff; 939 unsigned int sdpoff;
@@ -941,7 +1002,7 @@ static int process_sdp(struct sk_buff *skb,
941 else 1002 else
942 return NF_DROP; 1003 return NF_DROP;
943 1004
944 ret = set_expected_rtp_rtcp(skb, dptr, datalen, 1005 ret = set_expected_rtp_rtcp(skb, dataoff, dptr, datalen,
945 &rtp_addr, htons(port), t->class, 1006 &rtp_addr, htons(port), t->class,
946 mediaoff, medialen); 1007 mediaoff, medialen);
947 if (ret != NF_ACCEPT) 1008 if (ret != NF_ACCEPT)
@@ -949,8 +1010,9 @@ static int process_sdp(struct sk_buff *skb,
949 1010
950 /* Update media connection address if present */ 1011 /* Update media connection address if present */
951 if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) { 1012 if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) {
952 ret = nf_nat_sdp_addr(skb, dptr, mediaoff, datalen, 1013 ret = nf_nat_sdp_addr(skb, dataoff, dptr, datalen,
953 c_hdr, SDP_HDR_MEDIA, &rtp_addr); 1014 mediaoff, c_hdr, SDP_HDR_MEDIA,
1015 &rtp_addr);
954 if (ret != NF_ACCEPT) 1016 if (ret != NF_ACCEPT)
955 return ret; 1017 return ret;
956 } 1018 }
@@ -960,14 +1022,12 @@ static int process_sdp(struct sk_buff *skb,
960 /* Update session connection and owner addresses */ 1022 /* Update session connection and owner addresses */
961 nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook); 1023 nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook);
962 if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK) 1024 if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK)
963 ret = nf_nat_sdp_session(skb, dptr, sdpoff, datalen, &rtp_addr); 1025 ret = nf_nat_sdp_session(skb, dataoff, dptr, datalen, sdpoff,
964 1026 &rtp_addr);
965 if (ret == NF_ACCEPT && i > 0)
966 help->help.ct_sip_info.invite_cseq = cseq;
967 1027
968 return ret; 1028 return ret;
969} 1029}
970static int process_invite_response(struct sk_buff *skb, 1030static int process_invite_response(struct sk_buff *skb, unsigned int dataoff,
971 const char **dptr, unsigned int *datalen, 1031 const char **dptr, unsigned int *datalen,
972 unsigned int cseq, unsigned int code) 1032 unsigned int cseq, unsigned int code)
973{ 1033{
@@ -977,13 +1037,13 @@ static int process_invite_response(struct sk_buff *skb,
977 1037
978 if ((code >= 100 && code <= 199) || 1038 if ((code >= 100 && code <= 199) ||
979 (code >= 200 && code <= 299)) 1039 (code >= 200 && code <= 299))
980 return process_sdp(skb, dptr, datalen, cseq); 1040 return process_sdp(skb, dataoff, dptr, datalen, cseq);
981 else if (help->help.ct_sip_info.invite_cseq == cseq) 1041 else if (help->help.ct_sip_info.invite_cseq == cseq)
982 flush_expectations(ct, true); 1042 flush_expectations(ct, true);
983 return NF_ACCEPT; 1043 return NF_ACCEPT;
984} 1044}
985 1045
986static int process_update_response(struct sk_buff *skb, 1046static int process_update_response(struct sk_buff *skb, unsigned int dataoff,
987 const char **dptr, unsigned int *datalen, 1047 const char **dptr, unsigned int *datalen,
988 unsigned int cseq, unsigned int code) 1048 unsigned int cseq, unsigned int code)
989{ 1049{
@@ -993,13 +1053,13 @@ static int process_update_response(struct sk_buff *skb,
993 1053
994 if ((code >= 100 && code <= 199) || 1054 if ((code >= 100 && code <= 199) ||
995 (code >= 200 && code <= 299)) 1055 (code >= 200 && code <= 299))
996 return process_sdp(skb, dptr, datalen, cseq); 1056 return process_sdp(skb, dataoff, dptr, datalen, cseq);
997 else if (help->help.ct_sip_info.invite_cseq == cseq) 1057 else if (help->help.ct_sip_info.invite_cseq == cseq)
998 flush_expectations(ct, true); 1058 flush_expectations(ct, true);
999 return NF_ACCEPT; 1059 return NF_ACCEPT;
1000} 1060}
1001 1061
1002static int process_prack_response(struct sk_buff *skb, 1062static int process_prack_response(struct sk_buff *skb, unsigned int dataoff,
1003 const char **dptr, unsigned int *datalen, 1063 const char **dptr, unsigned int *datalen,
1004 unsigned int cseq, unsigned int code) 1064 unsigned int cseq, unsigned int code)
1005{ 1065{
@@ -1009,13 +1069,29 @@ static int process_prack_response(struct sk_buff *skb,
1009 1069
1010 if ((code >= 100 && code <= 199) || 1070 if ((code >= 100 && code <= 199) ||
1011 (code >= 200 && code <= 299)) 1071 (code >= 200 && code <= 299))
1012 return process_sdp(skb, dptr, datalen, cseq); 1072 return process_sdp(skb, dataoff, dptr, datalen, cseq);
1013 else if (help->help.ct_sip_info.invite_cseq == cseq) 1073 else if (help->help.ct_sip_info.invite_cseq == cseq)
1014 flush_expectations(ct, true); 1074 flush_expectations(ct, true);
1015 return NF_ACCEPT; 1075 return NF_ACCEPT;
1016} 1076}
1017 1077
1018static int process_bye_request(struct sk_buff *skb, 1078static int process_invite_request(struct sk_buff *skb, unsigned int dataoff,
1079 const char **dptr, unsigned int *datalen,
1080 unsigned int cseq)
1081{
1082 enum ip_conntrack_info ctinfo;
1083 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1084 struct nf_conn_help *help = nfct_help(ct);
1085 unsigned int ret;
1086
1087 flush_expectations(ct, true);
1088 ret = process_sdp(skb, dataoff, dptr, datalen, cseq);
1089 if (ret == NF_ACCEPT)
1090 help->help.ct_sip_info.invite_cseq = cseq;
1091 return ret;
1092}
1093
1094static int process_bye_request(struct sk_buff *skb, unsigned int dataoff,
1019 const char **dptr, unsigned int *datalen, 1095 const char **dptr, unsigned int *datalen,
1020 unsigned int cseq) 1096 unsigned int cseq)
1021{ 1097{
@@ -1030,7 +1106,7 @@ static int process_bye_request(struct sk_buff *skb,
1030 * signalling connections. The expectation is marked inactive and is activated 1106 * signalling connections. The expectation is marked inactive and is activated
1031 * when receiving a response indicating success from the registrar. 1107 * when receiving a response indicating success from the registrar.
1032 */ 1108 */
1033static int process_register_request(struct sk_buff *skb, 1109static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
1034 const char **dptr, unsigned int *datalen, 1110 const char **dptr, unsigned int *datalen,
1035 unsigned int cseq) 1111 unsigned int cseq)
1036{ 1112{
@@ -1042,6 +1118,7 @@ static int process_register_request(struct sk_buff *skb,
1042 struct nf_conntrack_expect *exp; 1118 struct nf_conntrack_expect *exp;
1043 union nf_inet_addr *saddr, daddr; 1119 union nf_inet_addr *saddr, daddr;
1044 __be16 port; 1120 __be16 port;
1121 u8 proto;
1045 unsigned int expires = 0; 1122 unsigned int expires = 0;
1046 int ret; 1123 int ret;
1047 typeof(nf_nat_sip_expect_hook) nf_nat_sip_expect; 1124 typeof(nf_nat_sip_expect_hook) nf_nat_sip_expect;
@@ -1074,6 +1151,10 @@ static int process_register_request(struct sk_buff *skb,
1074 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, &daddr)) 1151 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, &daddr))
1075 return NF_ACCEPT; 1152 return NF_ACCEPT;
1076 1153
1154 if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen, *datalen,
1155 &proto) == 0)
1156 return NF_ACCEPT;
1157
1077 if (ct_sip_parse_numerical_param(ct, *dptr, 1158 if (ct_sip_parse_numerical_param(ct, *dptr,
1078 matchoff + matchlen, *datalen, 1159 matchoff + matchlen, *datalen,
1079 "expires=", NULL, NULL, &expires) < 0) 1160 "expires=", NULL, NULL, &expires) < 0)
@@ -1093,14 +1174,14 @@ static int process_register_request(struct sk_buff *skb,
1093 saddr = &ct->tuplehash[!dir].tuple.src.u3; 1174 saddr = &ct->tuplehash[!dir].tuple.src.u3;
1094 1175
1095 nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct), 1176 nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct),
1096 saddr, &daddr, IPPROTO_UDP, NULL, &port); 1177 saddr, &daddr, proto, NULL, &port);
1097 exp->timeout.expires = sip_timeout * HZ; 1178 exp->timeout.expires = sip_timeout * HZ;
1098 exp->helper = nfct_help(ct)->helper; 1179 exp->helper = nfct_help(ct)->helper;
1099 exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE; 1180 exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE;
1100 1181
1101 nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook); 1182 nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook);
1102 if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK) 1183 if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK)
1103 ret = nf_nat_sip_expect(skb, dptr, datalen, exp, 1184 ret = nf_nat_sip_expect(skb, dataoff, dptr, datalen, exp,
1104 matchoff, matchlen); 1185 matchoff, matchlen);
1105 else { 1186 else {
1106 if (nf_ct_expect_related(exp) != 0) 1187 if (nf_ct_expect_related(exp) != 0)
@@ -1116,7 +1197,7 @@ store_cseq:
1116 return ret; 1197 return ret;
1117} 1198}
1118 1199
1119static int process_register_response(struct sk_buff *skb, 1200static int process_register_response(struct sk_buff *skb, unsigned int dataoff,
1120 const char **dptr, unsigned int *datalen, 1201 const char **dptr, unsigned int *datalen,
1121 unsigned int cseq, unsigned int code) 1202 unsigned int cseq, unsigned int code)
1122{ 1203{
@@ -1126,7 +1207,8 @@ static int process_register_response(struct sk_buff *skb,
1126 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); 1207 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
1127 union nf_inet_addr addr; 1208 union nf_inet_addr addr;
1128 __be16 port; 1209 __be16 port;
1129 unsigned int matchoff, matchlen, dataoff = 0; 1210 u8 proto;
1211 unsigned int matchoff, matchlen, coff = 0;
1130 unsigned int expires = 0; 1212 unsigned int expires = 0;
1131 int in_contact = 0, ret; 1213 int in_contact = 0, ret;
1132 1214
@@ -1153,7 +1235,7 @@ static int process_register_response(struct sk_buff *skb,
1153 while (1) { 1235 while (1) {
1154 unsigned int c_expires = expires; 1236 unsigned int c_expires = expires;
1155 1237
1156 ret = ct_sip_parse_header_uri(ct, *dptr, &dataoff, *datalen, 1238 ret = ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen,
1157 SIP_HDR_CONTACT, &in_contact, 1239 SIP_HDR_CONTACT, &in_contact,
1158 &matchoff, &matchlen, 1240 &matchoff, &matchlen,
1159 &addr, &port); 1241 &addr, &port);
@@ -1166,6 +1248,10 @@ static int process_register_response(struct sk_buff *skb,
1166 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &addr)) 1248 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &addr))
1167 continue; 1249 continue;
1168 1250
1251 if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen,
1252 *datalen, &proto) == 0)
1253 continue;
1254
1169 ret = ct_sip_parse_numerical_param(ct, *dptr, 1255 ret = ct_sip_parse_numerical_param(ct, *dptr,
1170 matchoff + matchlen, 1256 matchoff + matchlen,
1171 *datalen, "expires=", 1257 *datalen, "expires=",
@@ -1174,7 +1260,8 @@ static int process_register_response(struct sk_buff *skb,
1174 return NF_DROP; 1260 return NF_DROP;
1175 if (c_expires == 0) 1261 if (c_expires == 0)
1176 break; 1262 break;
1177 if (refresh_signalling_expectation(ct, &addr, port, c_expires)) 1263 if (refresh_signalling_expectation(ct, &addr, proto, port,
1264 c_expires))
1178 return NF_ACCEPT; 1265 return NF_ACCEPT;
1179 } 1266 }
1180 1267
@@ -1184,7 +1271,7 @@ flush:
1184} 1271}
1185 1272
1186static const struct sip_handler sip_handlers[] = { 1273static const struct sip_handler sip_handlers[] = {
1187 SIP_HANDLER("INVITE", process_sdp, process_invite_response), 1274 SIP_HANDLER("INVITE", process_invite_request, process_invite_response),
1188 SIP_HANDLER("UPDATE", process_sdp, process_update_response), 1275 SIP_HANDLER("UPDATE", process_sdp, process_update_response),
1189 SIP_HANDLER("ACK", process_sdp, NULL), 1276 SIP_HANDLER("ACK", process_sdp, NULL),
1190 SIP_HANDLER("PRACK", process_sdp, process_prack_response), 1277 SIP_HANDLER("PRACK", process_sdp, process_prack_response),
@@ -1192,13 +1279,13 @@ static const struct sip_handler sip_handlers[] = {
1192 SIP_HANDLER("REGISTER", process_register_request, process_register_response), 1279 SIP_HANDLER("REGISTER", process_register_request, process_register_response),
1193}; 1280};
1194 1281
1195static int process_sip_response(struct sk_buff *skb, 1282static int process_sip_response(struct sk_buff *skb, unsigned int dataoff,
1196 const char **dptr, unsigned int *datalen) 1283 const char **dptr, unsigned int *datalen)
1197{ 1284{
1198 enum ip_conntrack_info ctinfo; 1285 enum ip_conntrack_info ctinfo;
1199 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1286 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1200 unsigned int matchoff, matchlen; 1287 unsigned int matchoff, matchlen, matchend;
1201 unsigned int code, cseq, dataoff, i; 1288 unsigned int code, cseq, i;
1202 1289
1203 if (*datalen < strlen("SIP/2.0 200")) 1290 if (*datalen < strlen("SIP/2.0 200"))
1204 return NF_ACCEPT; 1291 return NF_ACCEPT;
@@ -1212,7 +1299,7 @@ static int process_sip_response(struct sk_buff *skb,
1212 cseq = simple_strtoul(*dptr + matchoff, NULL, 10); 1299 cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
1213 if (!cseq) 1300 if (!cseq)
1214 return NF_DROP; 1301 return NF_DROP;
1215 dataoff = matchoff + matchlen + 1; 1302 matchend = matchoff + matchlen + 1;
1216 1303
1217 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { 1304 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
1218 const struct sip_handler *handler; 1305 const struct sip_handler *handler;
@@ -1220,15 +1307,16 @@ static int process_sip_response(struct sk_buff *skb,
1220 handler = &sip_handlers[i]; 1307 handler = &sip_handlers[i];
1221 if (handler->response == NULL) 1308 if (handler->response == NULL)
1222 continue; 1309 continue;
1223 if (*datalen < dataoff + handler->len || 1310 if (*datalen < matchend + handler->len ||
1224 strnicmp(*dptr + dataoff, handler->method, handler->len)) 1311 strnicmp(*dptr + matchend, handler->method, handler->len))
1225 continue; 1312 continue;
1226 return handler->response(skb, dptr, datalen, cseq, code); 1313 return handler->response(skb, dataoff, dptr, datalen,
1314 cseq, code);
1227 } 1315 }
1228 return NF_ACCEPT; 1316 return NF_ACCEPT;
1229} 1317}
1230 1318
1231static int process_sip_request(struct sk_buff *skb, 1319static int process_sip_request(struct sk_buff *skb, unsigned int dataoff,
1232 const char **dptr, unsigned int *datalen) 1320 const char **dptr, unsigned int *datalen)
1233{ 1321{
1234 enum ip_conntrack_info ctinfo; 1322 enum ip_conntrack_info ctinfo;
@@ -1253,69 +1341,157 @@ static int process_sip_request(struct sk_buff *skb,
1253 if (!cseq) 1341 if (!cseq)
1254 return NF_DROP; 1342 return NF_DROP;
1255 1343
1256 return handler->request(skb, dptr, datalen, cseq); 1344 return handler->request(skb, dataoff, dptr, datalen, cseq);
1257 } 1345 }
1258 return NF_ACCEPT; 1346 return NF_ACCEPT;
1259} 1347}
1260 1348
1261static int sip_help(struct sk_buff *skb, 1349static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
1262 unsigned int protoff, 1350 unsigned int dataoff, const char **dptr,
1263 struct nf_conn *ct, 1351 unsigned int *datalen)
1264 enum ip_conntrack_info ctinfo) 1352{
1353 typeof(nf_nat_sip_hook) nf_nat_sip;
1354 int ret;
1355
1356 if (strnicmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
1357 ret = process_sip_request(skb, dataoff, dptr, datalen);
1358 else
1359 ret = process_sip_response(skb, dataoff, dptr, datalen);
1360
1361 if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
1362 nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
1363 if (nf_nat_sip && !nf_nat_sip(skb, dataoff, dptr, datalen))
1364 ret = NF_DROP;
1365 }
1366
1367 return ret;
1368}
1369
1370static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
1371 struct nf_conn *ct, enum ip_conntrack_info ctinfo)
1265{ 1372{
1373 struct tcphdr *th, _tcph;
1266 unsigned int dataoff, datalen; 1374 unsigned int dataoff, datalen;
1267 const char *dptr; 1375 unsigned int matchoff, matchlen, clen;
1376 unsigned int msglen, origlen;
1377 const char *dptr, *end;
1378 s16 diff, tdiff = 0;
1268 int ret; 1379 int ret;
1269 typeof(nf_nat_sip_hook) nf_nat_sip; 1380 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
1381
1382 if (ctinfo != IP_CT_ESTABLISHED &&
1383 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
1384 return NF_ACCEPT;
1270 1385
1271 /* No Data ? */ 1386 /* No Data ? */
1272 dataoff = protoff + sizeof(struct udphdr); 1387 th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
1388 if (th == NULL)
1389 return NF_ACCEPT;
1390 dataoff = protoff + th->doff * 4;
1273 if (dataoff >= skb->len) 1391 if (dataoff >= skb->len)
1274 return NF_ACCEPT; 1392 return NF_ACCEPT;
1275 1393
1276 nf_ct_refresh(ct, skb, sip_timeout * HZ); 1394 nf_ct_refresh(ct, skb, sip_timeout * HZ);
1277 1395
1278 if (!skb_is_nonlinear(skb)) 1396 if (skb_is_nonlinear(skb)) {
1279 dptr = skb->data + dataoff;
1280 else {
1281 pr_debug("Copy of skbuff not supported yet.\n"); 1397 pr_debug("Copy of skbuff not supported yet.\n");
1282 return NF_ACCEPT; 1398 return NF_ACCEPT;
1283 } 1399 }
1284 1400
1401 dptr = skb->data + dataoff;
1285 datalen = skb->len - dataoff; 1402 datalen = skb->len - dataoff;
1286 if (datalen < strlen("SIP/2.0 200")) 1403 if (datalen < strlen("SIP/2.0 200"))
1287 return NF_ACCEPT; 1404 return NF_ACCEPT;
1288 1405
1289 if (strnicmp(dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0) 1406 while (1) {
1290 ret = process_sip_request(skb, &dptr, &datalen); 1407 if (ct_sip_get_header(ct, dptr, 0, datalen,
1291 else 1408 SIP_HDR_CONTENT_LENGTH,
1292 ret = process_sip_response(skb, &dptr, &datalen); 1409 &matchoff, &matchlen) <= 0)
1410 break;
1411
1412 clen = simple_strtoul(dptr + matchoff, (char **)&end, 10);
1413 if (dptr + matchoff == end)
1414 break;
1415
1416 if (end + strlen("\r\n\r\n") > dptr + datalen)
1417 break;
1418 if (end[0] != '\r' || end[1] != '\n' ||
1419 end[2] != '\r' || end[3] != '\n')
1420 break;
1421 end += strlen("\r\n\r\n") + clen;
1422
1423 msglen = origlen = end - dptr;
1424
1425 ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen);
1426 if (ret != NF_ACCEPT)
1427 break;
1428 diff = msglen - origlen;
1429 tdiff += diff;
1430
1431 dataoff += msglen;
1432 dptr += msglen;
1433 datalen = datalen + diff - msglen;
1434 }
1293 1435
1294 if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) { 1436 if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
1295 nf_nat_sip = rcu_dereference(nf_nat_sip_hook); 1437 nf_nat_sip_seq_adjust = rcu_dereference(nf_nat_sip_seq_adjust_hook);
1296 if (nf_nat_sip && !nf_nat_sip(skb, &dptr, &datalen)) 1438 if (nf_nat_sip_seq_adjust)
1297 ret = NF_DROP; 1439 nf_nat_sip_seq_adjust(skb, tdiff);
1298 } 1440 }
1299 1441
1300 return ret; 1442 return ret;
1301} 1443}
1302 1444
1303static struct nf_conntrack_helper sip[MAX_PORTS][2] __read_mostly; 1445static int sip_help_udp(struct sk_buff *skb, unsigned int protoff,
1304static char sip_names[MAX_PORTS][2][sizeof("sip-65535")] __read_mostly; 1446 struct nf_conn *ct, enum ip_conntrack_info ctinfo)
1447{
1448 unsigned int dataoff, datalen;
1449 const char *dptr;
1450
1451 /* No Data ? */
1452 dataoff = protoff + sizeof(struct udphdr);
1453 if (dataoff >= skb->len)
1454 return NF_ACCEPT;
1455
1456 nf_ct_refresh(ct, skb, sip_timeout * HZ);
1457
1458 if (skb_is_nonlinear(skb)) {
1459 pr_debug("Copy of skbuff not supported yet.\n");
1460 return NF_ACCEPT;
1461 }
1462
1463 dptr = skb->data + dataoff;
1464 datalen = skb->len - dataoff;
1465 if (datalen < strlen("SIP/2.0 200"))
1466 return NF_ACCEPT;
1467
1468 return process_sip_msg(skb, ct, dataoff, &dptr, &datalen);
1469}
1470
1471static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly;
1472static char sip_names[MAX_PORTS][4][sizeof("sip-65535")] __read_mostly;
1305 1473
1306static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = { 1474static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = {
1307 [SIP_EXPECT_SIGNALLING] = { 1475 [SIP_EXPECT_SIGNALLING] = {
1476 .name = "signalling",
1308 .max_expected = 1, 1477 .max_expected = 1,
1309 .timeout = 3 * 60, 1478 .timeout = 3 * 60,
1310 }, 1479 },
1311 [SIP_EXPECT_AUDIO] = { 1480 [SIP_EXPECT_AUDIO] = {
1481 .name = "audio",
1312 .max_expected = 2 * IP_CT_DIR_MAX, 1482 .max_expected = 2 * IP_CT_DIR_MAX,
1313 .timeout = 3 * 60, 1483 .timeout = 3 * 60,
1314 }, 1484 },
1315 [SIP_EXPECT_VIDEO] = { 1485 [SIP_EXPECT_VIDEO] = {
1486 .name = "video",
1316 .max_expected = 2 * IP_CT_DIR_MAX, 1487 .max_expected = 2 * IP_CT_DIR_MAX,
1317 .timeout = 3 * 60, 1488 .timeout = 3 * 60,
1318 }, 1489 },
1490 [SIP_EXPECT_IMAGE] = {
1491 .name = "image",
1492 .max_expected = IP_CT_DIR_MAX,
1493 .timeout = 3 * 60,
1494 },
1319}; 1495};
1320 1496
1321static void nf_conntrack_sip_fini(void) 1497static void nf_conntrack_sip_fini(void)
@@ -1323,7 +1499,7 @@ static void nf_conntrack_sip_fini(void)
1323 int i, j; 1499 int i, j;
1324 1500
1325 for (i = 0; i < ports_c; i++) { 1501 for (i = 0; i < ports_c; i++) {
1326 for (j = 0; j < 2; j++) { 1502 for (j = 0; j < ARRAY_SIZE(sip[i]); j++) {
1327 if (sip[i][j].me == NULL) 1503 if (sip[i][j].me == NULL)
1328 continue; 1504 continue;
1329 nf_conntrack_helper_unregister(&sip[i][j]); 1505 nf_conntrack_helper_unregister(&sip[i][j]);
@@ -1343,14 +1519,24 @@ static int __init nf_conntrack_sip_init(void)
1343 memset(&sip[i], 0, sizeof(sip[i])); 1519 memset(&sip[i], 0, sizeof(sip[i]));
1344 1520
1345 sip[i][0].tuple.src.l3num = AF_INET; 1521 sip[i][0].tuple.src.l3num = AF_INET;
1346 sip[i][1].tuple.src.l3num = AF_INET6; 1522 sip[i][0].tuple.dst.protonum = IPPROTO_UDP;
1347 for (j = 0; j < 2; j++) { 1523 sip[i][0].help = sip_help_udp;
1348 sip[i][j].tuple.dst.protonum = IPPROTO_UDP; 1524 sip[i][1].tuple.src.l3num = AF_INET;
1525 sip[i][1].tuple.dst.protonum = IPPROTO_TCP;
1526 sip[i][1].help = sip_help_tcp;
1527
1528 sip[i][2].tuple.src.l3num = AF_INET6;
1529 sip[i][2].tuple.dst.protonum = IPPROTO_UDP;
1530 sip[i][2].help = sip_help_udp;
1531 sip[i][3].tuple.src.l3num = AF_INET6;
1532 sip[i][3].tuple.dst.protonum = IPPROTO_TCP;
1533 sip[i][3].help = sip_help_tcp;
1534
1535 for (j = 0; j < ARRAY_SIZE(sip[i]); j++) {
1349 sip[i][j].tuple.src.u.udp.port = htons(ports[i]); 1536 sip[i][j].tuple.src.u.udp.port = htons(ports[i]);
1350 sip[i][j].expect_policy = sip_exp_policy; 1537 sip[i][j].expect_policy = sip_exp_policy;
1351 sip[i][j].expect_class_max = SIP_EXPECT_MAX; 1538 sip[i][j].expect_class_max = SIP_EXPECT_MAX;
1352 sip[i][j].me = THIS_MODULE; 1539 sip[i][j].me = THIS_MODULE;
1353 sip[i][j].help = sip_help;
1354 1540
1355 tmpname = &sip_names[i][j][0]; 1541 tmpname = &sip_names[i][j][0];
1356 if (ports[i] == SIP_PORT) 1542 if (ports[i] == SIP_PORT)
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index e310f1561bb..24a42efe62e 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -26,6 +26,7 @@
26#include <net/netfilter/nf_conntrack_expect.h> 26#include <net/netfilter/nf_conntrack_expect.h>
27#include <net/netfilter/nf_conntrack_helper.h> 27#include <net/netfilter/nf_conntrack_helper.h>
28#include <net/netfilter/nf_conntrack_acct.h> 28#include <net/netfilter/nf_conntrack_acct.h>
29#include <net/netfilter/nf_conntrack_zones.h>
29 30
30MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
31 32
@@ -171,6 +172,11 @@ static int ct_seq_show(struct seq_file *s, void *v)
171 goto release; 172 goto release;
172#endif 173#endif
173 174
175#ifdef CONFIG_NF_CONNTRACK_ZONES
176 if (seq_printf(s, "zone=%u ", nf_ct_zone(ct)))
177 goto release;
178#endif
179
174 if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) 180 if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
175 goto release; 181 goto release;
176 182
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 3a6fd77f776..ba095fd014e 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -265,7 +265,6 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
265 local_bh_disable(); 265 local_bh_disable();
266 entry->okfn(skb); 266 entry->okfn(skb);
267 local_bh_enable(); 267 local_bh_enable();
268 case NF_STOLEN:
269 break; 268 break;
270 case NF_QUEUE: 269 case NF_QUEUE:
271 if (!__nf_queue(skb, elem, entry->pf, entry->hook, 270 if (!__nf_queue(skb, elem, entry->pf, entry->hook,
@@ -273,6 +272,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
273 verdict >> NF_VERDICT_BITS)) 272 verdict >> NF_VERDICT_BITS))
274 goto next_hook; 273 goto next_hook;
275 break; 274 break;
275 case NF_STOLEN:
276 default: 276 default:
277 kfree_skb(skb); 277 kfree_skb(skb);
278 } 278 }
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index eedc0c1ac7a..8eb0cc23ada 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -40,7 +40,6 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
40 40
41static char __initdata nfversion[] = "0.30"; 41static char __initdata nfversion[] = "0.30";
42 42
43static struct sock *nfnl = NULL;
44static const struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT]; 43static const struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT];
45static DEFINE_MUTEX(nfnl_mutex); 44static DEFINE_MUTEX(nfnl_mutex);
46 45
@@ -101,34 +100,35 @@ nfnetlink_find_client(u_int16_t type, const struct nfnetlink_subsystem *ss)
101 return &ss->cb[cb_id]; 100 return &ss->cb[cb_id];
102} 101}
103 102
104int nfnetlink_has_listeners(unsigned int group) 103int nfnetlink_has_listeners(struct net *net, unsigned int group)
105{ 104{
106 return netlink_has_listeners(nfnl, group); 105 return netlink_has_listeners(net->nfnl, group);
107} 106}
108EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); 107EXPORT_SYMBOL_GPL(nfnetlink_has_listeners);
109 108
110int nfnetlink_send(struct sk_buff *skb, u32 pid, 109int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid,
111 unsigned group, int echo, gfp_t flags) 110 unsigned group, int echo, gfp_t flags)
112{ 111{
113 return nlmsg_notify(nfnl, skb, pid, group, echo, flags); 112 return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags);
114} 113}
115EXPORT_SYMBOL_GPL(nfnetlink_send); 114EXPORT_SYMBOL_GPL(nfnetlink_send);
116 115
117void nfnetlink_set_err(u32 pid, u32 group, int error) 116void nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error)
118{ 117{
119 netlink_set_err(nfnl, pid, group, error); 118 netlink_set_err(net->nfnl, pid, group, error);
120} 119}
121EXPORT_SYMBOL_GPL(nfnetlink_set_err); 120EXPORT_SYMBOL_GPL(nfnetlink_set_err);
122 121
123int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags) 122int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags)
124{ 123{
125 return netlink_unicast(nfnl, skb, pid, flags); 124 return netlink_unicast(net->nfnl, skb, pid, flags);
126} 125}
127EXPORT_SYMBOL_GPL(nfnetlink_unicast); 126EXPORT_SYMBOL_GPL(nfnetlink_unicast);
128 127
129/* Process one complete nfnetlink message. */ 128/* Process one complete nfnetlink message. */
130static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 129static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
131{ 130{
131 struct net *net = sock_net(skb->sk);
132 const struct nfnl_callback *nc; 132 const struct nfnl_callback *nc;
133 const struct nfnetlink_subsystem *ss; 133 const struct nfnetlink_subsystem *ss;
134 int type, err; 134 int type, err;
@@ -170,7 +170,7 @@ replay:
170 if (err < 0) 170 if (err < 0)
171 return err; 171 return err;
172 172
173 err = nc->call(nfnl, skb, nlh, (const struct nlattr **)cda); 173 err = nc->call(net->nfnl, skb, nlh, (const struct nlattr **)cda);
174 if (err == -EAGAIN) 174 if (err == -EAGAIN)
175 goto replay; 175 goto replay;
176 return err; 176 return err;
@@ -184,26 +184,45 @@ static void nfnetlink_rcv(struct sk_buff *skb)
184 nfnl_unlock(); 184 nfnl_unlock();
185} 185}
186 186
187static void __exit nfnetlink_exit(void) 187static int __net_init nfnetlink_net_init(struct net *net)
188{ 188{
189 printk("Removing netfilter NETLINK layer.\n"); 189 struct sock *nfnl;
190 netlink_kernel_release(nfnl); 190
191 return; 191 nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, NFNLGRP_MAX,
192 nfnetlink_rcv, NULL, THIS_MODULE);
193 if (!nfnl)
194 return -ENOMEM;
195 net->nfnl_stash = nfnl;
196 rcu_assign_pointer(net->nfnl, nfnl);
197 return 0;
192} 198}
193 199
194static int __init nfnetlink_init(void) 200static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list)
195{ 201{
196 printk("Netfilter messages via NETLINK v%s.\n", nfversion); 202 struct net *net;
197 203
198 nfnl = netlink_kernel_create(&init_net, NETLINK_NETFILTER, NFNLGRP_MAX, 204 list_for_each_entry(net, net_exit_list, exit_list)
199 nfnetlink_rcv, NULL, THIS_MODULE); 205 rcu_assign_pointer(net->nfnl, NULL);
200 if (!nfnl) { 206 synchronize_net();
201 printk(KERN_ERR "cannot initialize nfnetlink!\n"); 207 list_for_each_entry(net, net_exit_list, exit_list)
202 return -ENOMEM; 208 netlink_kernel_release(net->nfnl_stash);
203 } 209}
204 210
205 return 0; 211static struct pernet_operations nfnetlink_net_ops = {
212 .init = nfnetlink_net_init,
213 .exit_batch = nfnetlink_net_exit_batch,
214};
215
216static int __init nfnetlink_init(void)
217{
218 printk("Netfilter messages via NETLINK v%s.\n", nfversion);
219 return register_pernet_subsys(&nfnetlink_net_ops);
206} 220}
207 221
222static void __exit nfnetlink_exit(void)
223{
224 printk("Removing netfilter NETLINK layer.\n");
225 unregister_pernet_subsys(&nfnetlink_net_ops);
226}
208module_init(nfnetlink_init); 227module_init(nfnetlink_init);
209module_exit(nfnetlink_exit); 228module_exit(nfnetlink_exit);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 9de0470d557..285e9029a9f 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -323,7 +323,8 @@ __nfulnl_send(struct nfulnl_instance *inst)
323 NLMSG_DONE, 323 NLMSG_DONE,
324 sizeof(struct nfgenmsg)); 324 sizeof(struct nfgenmsg));
325 325
326 status = nfnetlink_unicast(inst->skb, inst->peer_pid, MSG_DONTWAIT); 326 status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_pid,
327 MSG_DONTWAIT);
327 328
328 inst->qlen = 0; 329 inst->qlen = 0;
329 inst->skb = NULL; 330 inst->skb = NULL;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 7e3fa410641..7ba4abc405c 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -112,7 +112,6 @@ instance_create(u_int16_t queue_num, int pid)
112 inst->copy_mode = NFQNL_COPY_NONE; 112 inst->copy_mode = NFQNL_COPY_NONE;
113 spin_lock_init(&inst->lock); 113 spin_lock_init(&inst->lock);
114 INIT_LIST_HEAD(&inst->queue_list); 114 INIT_LIST_HEAD(&inst->queue_list);
115 INIT_RCU_HEAD(&inst->rcu);
116 115
117 if (!try_module_get(THIS_MODULE)) { 116 if (!try_module_get(THIS_MODULE)) {
118 err = -EAGAIN; 117 err = -EAGAIN;
@@ -414,13 +413,13 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
414 queue->queue_dropped++; 413 queue->queue_dropped++;
415 if (net_ratelimit()) 414 if (net_ratelimit())
416 printk(KERN_WARNING "nf_queue: full at %d entries, " 415 printk(KERN_WARNING "nf_queue: full at %d entries, "
417 "dropping packets(s). Dropped: %d\n", 416 "dropping packets(s).\n",
418 queue->queue_total, queue->queue_dropped); 417 queue->queue_total);
419 goto err_out_free_nskb; 418 goto err_out_free_nskb;
420 } 419 }
421 420
422 /* nfnetlink_unicast will either free the nskb or add it to a socket */ 421 /* nfnetlink_unicast will either free the nskb or add it to a socket */
423 err = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT); 422 err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT);
424 if (err < 0) { 423 if (err < 0) {
425 queue->queue_user_dropped++; 424 queue->queue_user_dropped++;
426 goto err_out_unlock; 425 goto err_out_unlock;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index f01955cce31..0a12cedfe9e 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -26,7 +26,9 @@
26 26
27#include <linux/netfilter/x_tables.h> 27#include <linux/netfilter/x_tables.h>
28#include <linux/netfilter_arp.h> 28#include <linux/netfilter_arp.h>
29 29#include <linux/netfilter_ipv4/ip_tables.h>
30#include <linux/netfilter_ipv6/ip6_tables.h>
31#include <linux/netfilter_arp/arp_tables.h>
30 32
31MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
32MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 34MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
@@ -37,7 +39,7 @@ MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
37struct compat_delta { 39struct compat_delta {
38 struct compat_delta *next; 40 struct compat_delta *next;
39 unsigned int offset; 41 unsigned int offset;
40 short delta; 42 int delta;
41}; 43};
42 44
43struct xt_af { 45struct xt_af {
@@ -364,8 +366,10 @@ int xt_check_match(struct xt_mtchk_param *par,
364 * ebt_among is exempt from centralized matchsize checking 366 * ebt_among is exempt from centralized matchsize checking
365 * because it uses a dynamic-size data set. 367 * because it uses a dynamic-size data set.
366 */ 368 */
367 pr_err("%s_tables: %s match: invalid size %Zu != %u\n", 369 pr_err("%s_tables: %s.%u match: invalid size "
370 "%u (kernel) != (user) %u\n",
368 xt_prefix[par->family], par->match->name, 371 xt_prefix[par->family], par->match->name,
372 par->match->revision,
369 XT_ALIGN(par->match->matchsize), size); 373 XT_ALIGN(par->match->matchsize), size);
370 return -EINVAL; 374 return -EINVAL;
371 } 375 }
@@ -435,10 +439,10 @@ void xt_compat_flush_offsets(u_int8_t af)
435} 439}
436EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); 440EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
437 441
438short xt_compat_calc_jump(u_int8_t af, unsigned int offset) 442int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
439{ 443{
440 struct compat_delta *tmp; 444 struct compat_delta *tmp;
441 short delta; 445 int delta;
442 446
443 for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next) 447 for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next)
444 if (tmp->offset < offset) 448 if (tmp->offset < offset)
@@ -481,8 +485,8 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
481} 485}
482EXPORT_SYMBOL_GPL(xt_compat_match_from_user); 486EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
483 487
484int xt_compat_match_to_user(struct xt_entry_match *m, void __user **dstptr, 488int xt_compat_match_to_user(const struct xt_entry_match *m,
485 unsigned int *size) 489 void __user **dstptr, unsigned int *size)
486{ 490{
487 const struct xt_match *match = m->u.kernel.match; 491 const struct xt_match *match = m->u.kernel.match;
488 struct compat_xt_entry_match __user *cm = *dstptr; 492 struct compat_xt_entry_match __user *cm = *dstptr;
@@ -514,8 +518,10 @@ int xt_check_target(struct xt_tgchk_param *par,
514 unsigned int size, u_int8_t proto, bool inv_proto) 518 unsigned int size, u_int8_t proto, bool inv_proto)
515{ 519{
516 if (XT_ALIGN(par->target->targetsize) != size) { 520 if (XT_ALIGN(par->target->targetsize) != size) {
517 pr_err("%s_tables: %s target: invalid size %Zu != %u\n", 521 pr_err("%s_tables: %s.%u target: invalid size "
522 "%u (kernel) != (user) %u\n",
518 xt_prefix[par->family], par->target->name, 523 xt_prefix[par->family], par->target->name,
524 par->target->revision,
519 XT_ALIGN(par->target->targetsize), size); 525 XT_ALIGN(par->target->targetsize), size);
520 return -EINVAL; 526 return -EINVAL;
521 } 527 }
@@ -582,8 +588,8 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
582} 588}
583EXPORT_SYMBOL_GPL(xt_compat_target_from_user); 589EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
584 590
585int xt_compat_target_to_user(struct xt_entry_target *t, void __user **dstptr, 591int xt_compat_target_to_user(const struct xt_entry_target *t,
586 unsigned int *size) 592 void __user **dstptr, unsigned int *size)
587{ 593{
588 const struct xt_target *target = t->u.kernel.target; 594 const struct xt_target *target = t->u.kernel.target;
589 struct compat_xt_entry_target __user *ct = *dstptr; 595 struct compat_xt_entry_target __user *ct = *dstptr;
@@ -1091,6 +1097,60 @@ static const struct file_operations xt_target_ops = {
1091 1097
1092#endif /* CONFIG_PROC_FS */ 1098#endif /* CONFIG_PROC_FS */
1093 1099
1100/**
1101 * xt_hook_link - set up hooks for a new table
1102 * @table: table with metadata needed to set up hooks
1103 * @fn: Hook function
1104 *
1105 * This function will take care of creating and registering the necessary
1106 * Netfilter hooks for XT tables.
1107 */
1108struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
1109{
1110 unsigned int hook_mask = table->valid_hooks;
1111 uint8_t i, num_hooks = hweight32(hook_mask);
1112 uint8_t hooknum;
1113 struct nf_hook_ops *ops;
1114 int ret;
1115
1116 ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
1117 if (ops == NULL)
1118 return ERR_PTR(-ENOMEM);
1119
1120 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1121 hook_mask >>= 1, ++hooknum) {
1122 if (!(hook_mask & 1))
1123 continue;
1124 ops[i].hook = fn;
1125 ops[i].owner = table->me;
1126 ops[i].pf = table->af;
1127 ops[i].hooknum = hooknum;
1128 ops[i].priority = table->priority;
1129 ++i;
1130 }
1131
1132 ret = nf_register_hooks(ops, num_hooks);
1133 if (ret < 0) {
1134 kfree(ops);
1135 return ERR_PTR(ret);
1136 }
1137
1138 return ops;
1139}
1140EXPORT_SYMBOL_GPL(xt_hook_link);
1141
1142/**
1143 * xt_hook_unlink - remove hooks for a table
1144 * @ops: nf_hook_ops array as returned by nf_hook_link
1145 * @hook_mask: the very same mask that was passed to nf_hook_link
1146 */
1147void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops)
1148{
1149 nf_unregister_hooks(ops, hweight32(table->valid_hooks));
1150 kfree(ops);
1151}
1152EXPORT_SYMBOL_GPL(xt_hook_unlink);
1153
1094int xt_proto_init(struct net *net, u_int8_t af) 1154int xt_proto_init(struct net *net, u_int8_t af)
1095{ 1155{
1096#ifdef CONFIG_PROC_FS 1156#ifdef CONFIG_PROC_FS
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
new file mode 100644
index 00000000000..61c50fa8470
--- /dev/null
+++ b/net/netfilter/xt_CT.c
@@ -0,0 +1,164 @@
1/*
2 * Copyright (c) 2010 Patrick McHardy <kaber@trash.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/module.h>
10#include <linux/skbuff.h>
11#include <linux/selinux.h>
12#include <linux/netfilter_ipv4/ip_tables.h>
13#include <linux/netfilter_ipv6/ip6_tables.h>
14#include <linux/netfilter/x_tables.h>
15#include <linux/netfilter/xt_CT.h>
16#include <net/netfilter/nf_conntrack.h>
17#include <net/netfilter/nf_conntrack_helper.h>
18#include <net/netfilter/nf_conntrack_ecache.h>
19#include <net/netfilter/nf_conntrack_zones.h>
20
21static unsigned int xt_ct_target(struct sk_buff *skb,
22 const struct xt_target_param *par)
23{
24 const struct xt_ct_target_info *info = par->targinfo;
25 struct nf_conn *ct = info->ct;
26
27 /* Previously seen (loopback)? Ignore. */
28 if (skb->nfct != NULL)
29 return XT_CONTINUE;
30
31 atomic_inc(&ct->ct_general.use);
32 skb->nfct = &ct->ct_general;
33 skb->nfctinfo = IP_CT_NEW;
34
35 return XT_CONTINUE;
36}
37
38static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
39{
40 if (par->family == AF_INET) {
41 const struct ipt_entry *e = par->entryinfo;
42
43 if (e->ip.invflags & IPT_INV_PROTO)
44 return 0;
45 return e->ip.proto;
46 } else if (par->family == AF_INET6) {
47 const struct ip6t_entry *e = par->entryinfo;
48
49 if (e->ipv6.invflags & IP6T_INV_PROTO)
50 return 0;
51 return e->ipv6.proto;
52 } else
53 return 0;
54}
55
56static bool xt_ct_tg_check(const struct xt_tgchk_param *par)
57{
58 struct xt_ct_target_info *info = par->targinfo;
59 struct nf_conntrack_tuple t;
60 struct nf_conn_help *help;
61 struct nf_conn *ct;
62 u8 proto;
63
64 if (info->flags & ~XT_CT_NOTRACK)
65 return false;
66
67 if (info->flags & XT_CT_NOTRACK) {
68 ct = &nf_conntrack_untracked;
69 atomic_inc(&ct->ct_general.use);
70 goto out;
71 }
72
73#ifndef CONFIG_NF_CONNTRACK_ZONES
74 if (info->zone)
75 goto err1;
76#endif
77
78 if (nf_ct_l3proto_try_module_get(par->family) < 0)
79 goto err1;
80
81 memset(&t, 0, sizeof(t));
82 ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
83 if (IS_ERR(ct))
84 goto err2;
85
86 if ((info->ct_events || info->exp_events) &&
87 !nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events,
88 GFP_KERNEL))
89 goto err3;
90
91 if (info->helper[0]) {
92 proto = xt_ct_find_proto(par);
93 if (!proto)
94 goto err3;
95
96 help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
97 if (help == NULL)
98 goto err3;
99
100 help->helper = nf_conntrack_helper_try_module_get(info->helper,
101 par->family,
102 proto);
103 if (help->helper == NULL)
104 goto err3;
105 }
106
107 __set_bit(IPS_TEMPLATE_BIT, &ct->status);
108 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
109out:
110 info->ct = ct;
111 return true;
112
113err3:
114 nf_conntrack_free(ct);
115err2:
116 nf_ct_l3proto_module_put(par->family);
117err1:
118 return false;
119}
120
121static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
122{
123 struct xt_ct_target_info *info = par->targinfo;
124 struct nf_conn *ct = info->ct;
125 struct nf_conn_help *help;
126
127 if (ct != &nf_conntrack_untracked) {
128 help = nfct_help(ct);
129 if (help)
130 module_put(help->helper->me);
131
132 nf_ct_l3proto_module_put(par->family);
133 }
134 nf_ct_put(info->ct);
135}
136
137static struct xt_target xt_ct_tg __read_mostly = {
138 .name = "CT",
139 .family = NFPROTO_UNSPEC,
140 .targetsize = XT_ALIGN(sizeof(struct xt_ct_target_info)),
141 .checkentry = xt_ct_tg_check,
142 .destroy = xt_ct_tg_destroy,
143 .target = xt_ct_target,
144 .table = "raw",
145 .me = THIS_MODULE,
146};
147
148static int __init xt_ct_tg_init(void)
149{
150 return xt_register_target(&xt_ct_tg);
151}
152
153static void __exit xt_ct_tg_exit(void)
154{
155 xt_unregister_target(&xt_ct_tg);
156}
157
158module_init(xt_ct_tg_init);
159module_exit(xt_ct_tg_exit);
160
161MODULE_LICENSE("GPL");
162MODULE_DESCRIPTION("Xtables: connection tracking target");
163MODULE_ALIAS("ipt_CT");
164MODULE_ALIAS("ip6t_CT");
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index f28f6a5fc02..12dcd7007c3 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -28,6 +28,7 @@ MODULE_ALIAS("ip6t_NFQUEUE");
28MODULE_ALIAS("arpt_NFQUEUE"); 28MODULE_ALIAS("arpt_NFQUEUE");
29 29
30static u32 jhash_initval __read_mostly; 30static u32 jhash_initval __read_mostly;
31static bool rnd_inited __read_mostly;
31 32
32static unsigned int 33static unsigned int
33nfqueue_tg(struct sk_buff *skb, const struct xt_target_param *par) 34nfqueue_tg(struct sk_buff *skb, const struct xt_target_param *par)
@@ -90,6 +91,10 @@ static bool nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
90 const struct xt_NFQ_info_v1 *info = par->targinfo; 91 const struct xt_NFQ_info_v1 *info = par->targinfo;
91 u32 maxid; 92 u32 maxid;
92 93
94 if (unlikely(!rnd_inited)) {
95 get_random_bytes(&jhash_initval, sizeof(jhash_initval));
96 rnd_inited = true;
97 }
93 if (info->queues_total == 0) { 98 if (info->queues_total == 0) {
94 pr_err("NFQUEUE: number of total queues is 0\n"); 99 pr_err("NFQUEUE: number of total queues is 0\n");
95 return false; 100 return false;
@@ -135,7 +140,6 @@ static struct xt_target nfqueue_tg_reg[] __read_mostly = {
135 140
136static int __init nfqueue_tg_init(void) 141static int __init nfqueue_tg_init(void)
137{ 142{
138 get_random_bytes(&jhash_initval, sizeof(jhash_initval));
139 return xt_register_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg)); 143 return xt_register_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg));
140} 144}
141 145
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index d80b8192e0d..87ae97e5516 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -23,6 +23,7 @@ static DEFINE_MUTEX(xt_rateest_mutex);
23#define RATEEST_HSIZE 16 23#define RATEEST_HSIZE 16
24static struct hlist_head rateest_hash[RATEEST_HSIZE] __read_mostly; 24static struct hlist_head rateest_hash[RATEEST_HSIZE] __read_mostly;
25static unsigned int jhash_rnd __read_mostly; 25static unsigned int jhash_rnd __read_mostly;
26static bool rnd_inited __read_mostly;
26 27
27static unsigned int xt_rateest_hash(const char *name) 28static unsigned int xt_rateest_hash(const char *name)
28{ 29{
@@ -93,6 +94,11 @@ static bool xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
93 struct gnet_estimator est; 94 struct gnet_estimator est;
94 } cfg; 95 } cfg;
95 96
97 if (unlikely(!rnd_inited)) {
98 get_random_bytes(&jhash_rnd, sizeof(jhash_rnd));
99 rnd_inited = true;
100 }
101
96 est = xt_rateest_lookup(info->name); 102 est = xt_rateest_lookup(info->name);
97 if (est) { 103 if (est) {
98 /* 104 /*
@@ -164,7 +170,6 @@ static int __init xt_rateest_tg_init(void)
164 for (i = 0; i < ARRAY_SIZE(rateest_hash); i++) 170 for (i = 0; i < ARRAY_SIZE(rateest_hash); i++)
165 INIT_HLIST_HEAD(&rateest_hash[i]); 171 INIT_HLIST_HEAD(&rateest_hash[i]);
166 172
167 get_random_bytes(&jhash_rnd, sizeof(jhash_rnd));
168 return xt_register_target(&xt_rateest_tg_reg); 173 return xt_register_target(&xt_rateest_tg_reg);
169} 174}
170 175
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index eda64c1cb1e..0e357ac9a2a 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -60,17 +60,9 @@ tcpmss_mangle_packet(struct sk_buff *skb,
60 tcplen = skb->len - tcphoff; 60 tcplen = skb->len - tcphoff;
61 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 61 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
62 62
63 /* Since it passed flags test in tcp match, we know it is is 63 /* Header cannot be larger than the packet */
64 not a fragment, and has data >= tcp header length. SYN 64 if (tcplen < tcph->doff*4)
65 packets should not contain data: if they did, then we risk
66 running over MTU, sending Frag Needed and breaking things
67 badly. --RR */
68 if (tcplen != tcph->doff*4) {
69 if (net_ratelimit())
70 printk(KERN_ERR "xt_TCPMSS: bad length (%u bytes)\n",
71 skb->len);
72 return -1; 65 return -1;
73 }
74 66
75 if (info->mss == XT_TCPMSS_CLAMP_PMTU) { 67 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
76 if (dst_mtu(skb_dst(skb)) <= minlen) { 68 if (dst_mtu(skb_dst(skb)) <= minlen) {
@@ -115,6 +107,12 @@ tcpmss_mangle_packet(struct sk_buff *skb,
115 } 107 }
116 } 108 }
117 109
110 /* There is data after the header so the option can't be added
111 without moving it, and doing so may make the SYN packet
112 itself too large. Accept the packet unmodified instead. */
113 if (tcplen > tcph->doff*4)
114 return 0;
115
118 /* 116 /*
119 * MSS Option not found ?! add it.. 117 * MSS Option not found ?! add it..
120 */ 118 */
@@ -241,6 +239,7 @@ static bool tcpmss_tg4_check(const struct xt_tgchk_param *par)
241{ 239{
242 const struct xt_tcpmss_info *info = par->targinfo; 240 const struct xt_tcpmss_info *info = par->targinfo;
243 const struct ipt_entry *e = par->entryinfo; 241 const struct ipt_entry *e = par->entryinfo;
242 const struct xt_entry_match *ematch;
244 243
245 if (info->mss == XT_TCPMSS_CLAMP_PMTU && 244 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
246 (par->hook_mask & ~((1 << NF_INET_FORWARD) | 245 (par->hook_mask & ~((1 << NF_INET_FORWARD) |
@@ -250,8 +249,9 @@ static bool tcpmss_tg4_check(const struct xt_tgchk_param *par)
250 "FORWARD, OUTPUT and POSTROUTING hooks\n"); 249 "FORWARD, OUTPUT and POSTROUTING hooks\n");
251 return false; 250 return false;
252 } 251 }
253 if (IPT_MATCH_ITERATE(e, find_syn_match)) 252 xt_ematch_foreach(ematch, e)
254 return true; 253 if (find_syn_match(ematch))
254 return true;
255 printk("xt_TCPMSS: Only works on TCP SYN packets\n"); 255 printk("xt_TCPMSS: Only works on TCP SYN packets\n");
256 return false; 256 return false;
257} 257}
@@ -261,6 +261,7 @@ static bool tcpmss_tg6_check(const struct xt_tgchk_param *par)
261{ 261{
262 const struct xt_tcpmss_info *info = par->targinfo; 262 const struct xt_tcpmss_info *info = par->targinfo;
263 const struct ip6t_entry *e = par->entryinfo; 263 const struct ip6t_entry *e = par->entryinfo;
264 const struct xt_entry_match *ematch;
264 265
265 if (info->mss == XT_TCPMSS_CLAMP_PMTU && 266 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
266 (par->hook_mask & ~((1 << NF_INET_FORWARD) | 267 (par->hook_mask & ~((1 << NF_INET_FORWARD) |
@@ -270,8 +271,9 @@ static bool tcpmss_tg6_check(const struct xt_tgchk_param *par)
270 "FORWARD, OUTPUT and POSTROUTING hooks\n"); 271 "FORWARD, OUTPUT and POSTROUTING hooks\n");
271 return false; 272 return false;
272 } 273 }
273 if (IP6T_MATCH_ITERATE(e, find_syn_match)) 274 xt_ematch_foreach(ematch, e)
274 return true; 275 if (find_syn_match(ematch))
276 return true;
275 printk("xt_TCPMSS: Only works on TCP SYN packets\n"); 277 printk("xt_TCPMSS: Only works on TCP SYN packets\n");
276 return false; 278 return false;
277} 279}
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 38f03f75a63..26997ce90e4 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -28,6 +28,7 @@
28#include <net/netfilter/nf_conntrack.h> 28#include <net/netfilter/nf_conntrack.h>
29#include <net/netfilter/nf_conntrack_core.h> 29#include <net/netfilter/nf_conntrack_core.h>
30#include <net/netfilter/nf_conntrack_tuple.h> 30#include <net/netfilter/nf_conntrack_tuple.h>
31#include <net/netfilter/nf_conntrack_zones.h>
31 32
32/* we will save the tuples of all connections we care about */ 33/* we will save the tuples of all connections we care about */
33struct xt_connlimit_conn { 34struct xt_connlimit_conn {
@@ -40,15 +41,11 @@ struct xt_connlimit_data {
40 spinlock_t lock; 41 spinlock_t lock;
41}; 42};
42 43
43static u_int32_t connlimit_rnd; 44static u_int32_t connlimit_rnd __read_mostly;
44static bool connlimit_rnd_inited; 45static bool connlimit_rnd_inited __read_mostly;
45 46
46static inline unsigned int connlimit_iphash(__be32 addr) 47static inline unsigned int connlimit_iphash(__be32 addr)
47{ 48{
48 if (unlikely(!connlimit_rnd_inited)) {
49 get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
50 connlimit_rnd_inited = true;
51 }
52 return jhash_1word((__force __u32)addr, connlimit_rnd) & 0xFF; 49 return jhash_1word((__force __u32)addr, connlimit_rnd) & 0xFF;
53} 50}
54 51
@@ -59,11 +56,6 @@ connlimit_iphash6(const union nf_inet_addr *addr,
59 union nf_inet_addr res; 56 union nf_inet_addr res;
60 unsigned int i; 57 unsigned int i;
61 58
62 if (unlikely(!connlimit_rnd_inited)) {
63 get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
64 connlimit_rnd_inited = true;
65 }
66
67 for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i) 59 for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i)
68 res.ip6[i] = addr->ip6[i] & mask->ip6[i]; 60 res.ip6[i] = addr->ip6[i] & mask->ip6[i];
69 61
@@ -99,7 +91,8 @@ same_source_net(const union nf_inet_addr *addr,
99 } 91 }
100} 92}
101 93
102static int count_them(struct xt_connlimit_data *data, 94static int count_them(struct net *net,
95 struct xt_connlimit_data *data,
103 const struct nf_conntrack_tuple *tuple, 96 const struct nf_conntrack_tuple *tuple,
104 const union nf_inet_addr *addr, 97 const union nf_inet_addr *addr,
105 const union nf_inet_addr *mask, 98 const union nf_inet_addr *mask,
@@ -122,7 +115,8 @@ static int count_them(struct xt_connlimit_data *data,
122 115
123 /* check the saved connections */ 116 /* check the saved connections */
124 list_for_each_entry_safe(conn, tmp, hash, list) { 117 list_for_each_entry_safe(conn, tmp, hash, list) {
125 found = nf_conntrack_find_get(&init_net, &conn->tuple); 118 found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE,
119 &conn->tuple);
126 found_ct = NULL; 120 found_ct = NULL;
127 121
128 if (found != NULL) 122 if (found != NULL)
@@ -180,6 +174,7 @@ static int count_them(struct xt_connlimit_data *data,
180static bool 174static bool
181connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par) 175connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
182{ 176{
177 struct net *net = dev_net(par->in ? par->in : par->out);
183 const struct xt_connlimit_info *info = par->matchinfo; 178 const struct xt_connlimit_info *info = par->matchinfo;
184 union nf_inet_addr addr; 179 union nf_inet_addr addr;
185 struct nf_conntrack_tuple tuple; 180 struct nf_conntrack_tuple tuple;
@@ -204,7 +199,7 @@ connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
204 } 199 }
205 200
206 spin_lock_bh(&info->data->lock); 201 spin_lock_bh(&info->data->lock);
207 connections = count_them(info->data, tuple_ptr, &addr, 202 connections = count_them(net, info->data, tuple_ptr, &addr,
208 &info->mask, par->family); 203 &info->mask, par->family);
209 spin_unlock_bh(&info->data->lock); 204 spin_unlock_bh(&info->data->lock);
210 205
@@ -226,6 +221,10 @@ static bool connlimit_mt_check(const struct xt_mtchk_param *par)
226 struct xt_connlimit_info *info = par->matchinfo; 221 struct xt_connlimit_info *info = par->matchinfo;
227 unsigned int i; 222 unsigned int i;
228 223
224 if (unlikely(!connlimit_rnd_inited)) {
225 get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
226 connlimit_rnd_inited = true;
227 }
229 if (nf_ct_l3proto_try_module_get(par->family) < 0) { 228 if (nf_ct_l3proto_try_module_get(par->family) < 0) {
230 printk(KERN_WARNING "cannot load conntrack support for " 229 printk(KERN_WARNING "cannot load conntrack support for "
231 "address family %u\n", par->family); 230 "address family %u\n", par->family);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index dd16e404424..d952806b646 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -26,6 +26,7 @@
26#endif 26#endif
27 27
28#include <net/net_namespace.h> 28#include <net/net_namespace.h>
29#include <net/netns/generic.h>
29 30
30#include <linux/netfilter/x_tables.h> 31#include <linux/netfilter/x_tables.h>
31#include <linux/netfilter_ipv4/ip_tables.h> 32#include <linux/netfilter_ipv4/ip_tables.h>
@@ -40,9 +41,19 @@ MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match");
40MODULE_ALIAS("ipt_hashlimit"); 41MODULE_ALIAS("ipt_hashlimit");
41MODULE_ALIAS("ip6t_hashlimit"); 42MODULE_ALIAS("ip6t_hashlimit");
42 43
44struct hashlimit_net {
45 struct hlist_head htables;
46 struct proc_dir_entry *ipt_hashlimit;
47 struct proc_dir_entry *ip6t_hashlimit;
48};
49
50static int hashlimit_net_id;
51static inline struct hashlimit_net *hashlimit_pernet(struct net *net)
52{
53 return net_generic(net, hashlimit_net_id);
54}
55
43/* need to declare this at the top */ 56/* need to declare this at the top */
44static struct proc_dir_entry *hashlimit_procdir4;
45static struct proc_dir_entry *hashlimit_procdir6;
46static const struct file_operations dl_file_ops; 57static const struct file_operations dl_file_ops;
47 58
48/* hash table crap */ 59/* hash table crap */
@@ -79,27 +90,26 @@ struct dsthash_ent {
79 90
80struct xt_hashlimit_htable { 91struct xt_hashlimit_htable {
81 struct hlist_node node; /* global list of all htables */ 92 struct hlist_node node; /* global list of all htables */
82 atomic_t use; 93 int use;
83 u_int8_t family; 94 u_int8_t family;
95 bool rnd_initialized;
84 96
85 struct hashlimit_cfg1 cfg; /* config */ 97 struct hashlimit_cfg1 cfg; /* config */
86 98
87 /* used internally */ 99 /* used internally */
88 spinlock_t lock; /* lock for list_head */ 100 spinlock_t lock; /* lock for list_head */
89 u_int32_t rnd; /* random seed for hash */ 101 u_int32_t rnd; /* random seed for hash */
90 int rnd_initialized;
91 unsigned int count; /* number entries in table */ 102 unsigned int count; /* number entries in table */
92 struct timer_list timer; /* timer for gc */ 103 struct timer_list timer; /* timer for gc */
93 104
94 /* seq_file stuff */ 105 /* seq_file stuff */
95 struct proc_dir_entry *pde; 106 struct proc_dir_entry *pde;
107 struct net *net;
96 108
97 struct hlist_head hash[0]; /* hashtable itself */ 109 struct hlist_head hash[0]; /* hashtable itself */
98}; 110};
99 111
100static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ 112static DEFINE_MUTEX(hashlimit_mutex); /* protects htables list */
101static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */
102static HLIST_HEAD(hashlimit_htables);
103static struct kmem_cache *hashlimit_cachep __read_mostly; 113static struct kmem_cache *hashlimit_cachep __read_mostly;
104 114
105static inline bool dst_cmp(const struct dsthash_ent *ent, 115static inline bool dst_cmp(const struct dsthash_ent *ent,
@@ -150,7 +160,7 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht,
150 * the first hashtable entry */ 160 * the first hashtable entry */
151 if (!ht->rnd_initialized) { 161 if (!ht->rnd_initialized) {
152 get_random_bytes(&ht->rnd, sizeof(ht->rnd)); 162 get_random_bytes(&ht->rnd, sizeof(ht->rnd));
153 ht->rnd_initialized = 1; 163 ht->rnd_initialized = true;
154 } 164 }
155 165
156 if (ht->cfg.max && ht->count >= ht->cfg.max) { 166 if (ht->cfg.max && ht->count >= ht->cfg.max) {
@@ -185,8 +195,9 @@ dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
185} 195}
186static void htable_gc(unsigned long htlong); 196static void htable_gc(unsigned long htlong);
187 197
188static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family) 198static int htable_create_v0(struct net *net, struct xt_hashlimit_info *minfo, u_int8_t family)
189{ 199{
200 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
190 struct xt_hashlimit_htable *hinfo; 201 struct xt_hashlimit_htable *hinfo;
191 unsigned int size; 202 unsigned int size;
192 unsigned int i; 203 unsigned int i;
@@ -232,33 +243,34 @@ static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family)
232 for (i = 0; i < hinfo->cfg.size; i++) 243 for (i = 0; i < hinfo->cfg.size; i++)
233 INIT_HLIST_HEAD(&hinfo->hash[i]); 244 INIT_HLIST_HEAD(&hinfo->hash[i]);
234 245
235 atomic_set(&hinfo->use, 1); 246 hinfo->use = 1;
236 hinfo->count = 0; 247 hinfo->count = 0;
237 hinfo->family = family; 248 hinfo->family = family;
238 hinfo->rnd_initialized = 0; 249 hinfo->rnd_initialized = false;
239 spin_lock_init(&hinfo->lock); 250 spin_lock_init(&hinfo->lock);
240 hinfo->pde = proc_create_data(minfo->name, 0, 251 hinfo->pde = proc_create_data(minfo->name, 0,
241 (family == NFPROTO_IPV4) ? 252 (family == NFPROTO_IPV4) ?
242 hashlimit_procdir4 : hashlimit_procdir6, 253 hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
243 &dl_file_ops, hinfo); 254 &dl_file_ops, hinfo);
244 if (!hinfo->pde) { 255 if (!hinfo->pde) {
245 vfree(hinfo); 256 vfree(hinfo);
246 return -1; 257 return -1;
247 } 258 }
259 hinfo->net = net;
248 260
249 setup_timer(&hinfo->timer, htable_gc, (unsigned long )hinfo); 261 setup_timer(&hinfo->timer, htable_gc, (unsigned long )hinfo);
250 hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval); 262 hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
251 add_timer(&hinfo->timer); 263 add_timer(&hinfo->timer);
252 264
253 spin_lock_bh(&hashlimit_lock); 265 hlist_add_head(&hinfo->node, &hashlimit_net->htables);
254 hlist_add_head(&hinfo->node, &hashlimit_htables);
255 spin_unlock_bh(&hashlimit_lock);
256 266
257 return 0; 267 return 0;
258} 268}
259 269
260static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family) 270static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
271 u_int8_t family)
261{ 272{
273 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
262 struct xt_hashlimit_htable *hinfo; 274 struct xt_hashlimit_htable *hinfo;
263 unsigned int size; 275 unsigned int size;
264 unsigned int i; 276 unsigned int i;
@@ -293,28 +305,27 @@ static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family)
293 for (i = 0; i < hinfo->cfg.size; i++) 305 for (i = 0; i < hinfo->cfg.size; i++)
294 INIT_HLIST_HEAD(&hinfo->hash[i]); 306 INIT_HLIST_HEAD(&hinfo->hash[i]);
295 307
296 atomic_set(&hinfo->use, 1); 308 hinfo->use = 1;
297 hinfo->count = 0; 309 hinfo->count = 0;
298 hinfo->family = family; 310 hinfo->family = family;
299 hinfo->rnd_initialized = 0; 311 hinfo->rnd_initialized = false;
300 spin_lock_init(&hinfo->lock); 312 spin_lock_init(&hinfo->lock);
301 313
302 hinfo->pde = proc_create_data(minfo->name, 0, 314 hinfo->pde = proc_create_data(minfo->name, 0,
303 (family == NFPROTO_IPV4) ? 315 (family == NFPROTO_IPV4) ?
304 hashlimit_procdir4 : hashlimit_procdir6, 316 hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
305 &dl_file_ops, hinfo); 317 &dl_file_ops, hinfo);
306 if (hinfo->pde == NULL) { 318 if (hinfo->pde == NULL) {
307 vfree(hinfo); 319 vfree(hinfo);
308 return -1; 320 return -1;
309 } 321 }
322 hinfo->net = net;
310 323
311 setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo); 324 setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo);
312 hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval); 325 hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
313 add_timer(&hinfo->timer); 326 add_timer(&hinfo->timer);
314 327
315 spin_lock_bh(&hashlimit_lock); 328 hlist_add_head(&hinfo->node, &hashlimit_net->htables);
316 hlist_add_head(&hinfo->node, &hashlimit_htables);
317 spin_unlock_bh(&hashlimit_lock);
318 329
319 return 0; 330 return 0;
320} 331}
@@ -364,43 +375,46 @@ static void htable_gc(unsigned long htlong)
364 375
365static void htable_destroy(struct xt_hashlimit_htable *hinfo) 376static void htable_destroy(struct xt_hashlimit_htable *hinfo)
366{ 377{
378 struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net);
379 struct proc_dir_entry *parent;
380
367 del_timer_sync(&hinfo->timer); 381 del_timer_sync(&hinfo->timer);
368 382
369 /* remove proc entry */ 383 if (hinfo->family == NFPROTO_IPV4)
370 remove_proc_entry(hinfo->pde->name, 384 parent = hashlimit_net->ipt_hashlimit;
371 hinfo->family == NFPROTO_IPV4 ? hashlimit_procdir4 : 385 else
372 hashlimit_procdir6); 386 parent = hashlimit_net->ip6t_hashlimit;
387 remove_proc_entry(hinfo->pde->name, parent);
373 htable_selective_cleanup(hinfo, select_all); 388 htable_selective_cleanup(hinfo, select_all);
374 vfree(hinfo); 389 vfree(hinfo);
375} 390}
376 391
377static struct xt_hashlimit_htable *htable_find_get(const char *name, 392static struct xt_hashlimit_htable *htable_find_get(struct net *net,
393 const char *name,
378 u_int8_t family) 394 u_int8_t family)
379{ 395{
396 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
380 struct xt_hashlimit_htable *hinfo; 397 struct xt_hashlimit_htable *hinfo;
381 struct hlist_node *pos; 398 struct hlist_node *pos;
382 399
383 spin_lock_bh(&hashlimit_lock); 400 hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node) {
384 hlist_for_each_entry(hinfo, pos, &hashlimit_htables, node) {
385 if (!strcmp(name, hinfo->pde->name) && 401 if (!strcmp(name, hinfo->pde->name) &&
386 hinfo->family == family) { 402 hinfo->family == family) {
387 atomic_inc(&hinfo->use); 403 hinfo->use++;
388 spin_unlock_bh(&hashlimit_lock);
389 return hinfo; 404 return hinfo;
390 } 405 }
391 } 406 }
392 spin_unlock_bh(&hashlimit_lock);
393 return NULL; 407 return NULL;
394} 408}
395 409
396static void htable_put(struct xt_hashlimit_htable *hinfo) 410static void htable_put(struct xt_hashlimit_htable *hinfo)
397{ 411{
398 if (atomic_dec_and_test(&hinfo->use)) { 412 mutex_lock(&hashlimit_mutex);
399 spin_lock_bh(&hashlimit_lock); 413 if (--hinfo->use == 0) {
400 hlist_del(&hinfo->node); 414 hlist_del(&hinfo->node);
401 spin_unlock_bh(&hashlimit_lock);
402 htable_destroy(hinfo); 415 htable_destroy(hinfo);
403 } 416 }
417 mutex_unlock(&hashlimit_mutex);
404} 418}
405 419
406/* The algorithm used is the Simple Token Bucket Filter (TBF) 420/* The algorithm used is the Simple Token Bucket Filter (TBF)
@@ -665,6 +679,7 @@ hashlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
665 679
666static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par) 680static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par)
667{ 681{
682 struct net *net = par->net;
668 struct xt_hashlimit_info *r = par->matchinfo; 683 struct xt_hashlimit_info *r = par->matchinfo;
669 684
670 /* Check for overflow. */ 685 /* Check for overflow. */
@@ -687,25 +702,20 @@ static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par)
687 if (r->name[sizeof(r->name) - 1] != '\0') 702 if (r->name[sizeof(r->name) - 1] != '\0')
688 return false; 703 return false;
689 704
690 /* This is the best we've got: We cannot release and re-grab lock, 705 mutex_lock(&hashlimit_mutex);
691 * since checkentry() is called before x_tables.c grabs xt_mutex. 706 r->hinfo = htable_find_get(net, r->name, par->match->family);
692 * We also cannot grab the hashtable spinlock, since htable_create will 707 if (!r->hinfo && htable_create_v0(net, r, par->match->family) != 0) {
693 * call vmalloc, and that can sleep. And we cannot just re-search 708 mutex_unlock(&hashlimit_mutex);
694 * the list of htable's in htable_create(), since then we would
695 * create duplicate proc files. -HW */
696 mutex_lock(&hlimit_mutex);
697 r->hinfo = htable_find_get(r->name, par->match->family);
698 if (!r->hinfo && htable_create_v0(r, par->match->family) != 0) {
699 mutex_unlock(&hlimit_mutex);
700 return false; 709 return false;
701 } 710 }
702 mutex_unlock(&hlimit_mutex); 711 mutex_unlock(&hashlimit_mutex);
703 712
704 return true; 713 return true;
705} 714}
706 715
707static bool hashlimit_mt_check(const struct xt_mtchk_param *par) 716static bool hashlimit_mt_check(const struct xt_mtchk_param *par)
708{ 717{
718 struct net *net = par->net;
709 struct xt_hashlimit_mtinfo1 *info = par->matchinfo; 719 struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
710 720
711 /* Check for overflow. */ 721 /* Check for overflow. */
@@ -728,19 +738,13 @@ static bool hashlimit_mt_check(const struct xt_mtchk_param *par)
728 return false; 738 return false;
729 } 739 }
730 740
731 /* This is the best we've got: We cannot release and re-grab lock, 741 mutex_lock(&hashlimit_mutex);
732 * since checkentry() is called before x_tables.c grabs xt_mutex. 742 info->hinfo = htable_find_get(net, info->name, par->match->family);
733 * We also cannot grab the hashtable spinlock, since htable_create will 743 if (!info->hinfo && htable_create(net, info, par->match->family) != 0) {
734 * call vmalloc, and that can sleep. And we cannot just re-search 744 mutex_unlock(&hashlimit_mutex);
735 * the list of htable's in htable_create(), since then we would
736 * create duplicate proc files. -HW */
737 mutex_lock(&hlimit_mutex);
738 info->hinfo = htable_find_get(info->name, par->match->family);
739 if (!info->hinfo && htable_create(info, par->match->family) != 0) {
740 mutex_unlock(&hlimit_mutex);
741 return false; 745 return false;
742 } 746 }
743 mutex_unlock(&hlimit_mutex); 747 mutex_unlock(&hashlimit_mutex);
744 return true; 748 return true;
745} 749}
746 750
@@ -767,7 +771,7 @@ struct compat_xt_hashlimit_info {
767 compat_uptr_t master; 771 compat_uptr_t master;
768}; 772};
769 773
770static void hashlimit_mt_compat_from_user(void *dst, void *src) 774static void hashlimit_mt_compat_from_user(void *dst, const void *src)
771{ 775{
772 int off = offsetof(struct compat_xt_hashlimit_info, hinfo); 776 int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
773 777
@@ -775,7 +779,7 @@ static void hashlimit_mt_compat_from_user(void *dst, void *src)
775 memset(dst + off, 0, sizeof(struct compat_xt_hashlimit_info) - off); 779 memset(dst + off, 0, sizeof(struct compat_xt_hashlimit_info) - off);
776} 780}
777 781
778static int hashlimit_mt_compat_to_user(void __user *dst, void *src) 782static int hashlimit_mt_compat_to_user(void __user *dst, const void *src)
779{ 783{
780 int off = offsetof(struct compat_xt_hashlimit_info, hinfo); 784 int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
781 785
@@ -841,8 +845,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
841static void *dl_seq_start(struct seq_file *s, loff_t *pos) 845static void *dl_seq_start(struct seq_file *s, loff_t *pos)
842 __acquires(htable->lock) 846 __acquires(htable->lock)
843{ 847{
844 struct proc_dir_entry *pde = s->private; 848 struct xt_hashlimit_htable *htable = s->private;
845 struct xt_hashlimit_htable *htable = pde->data;
846 unsigned int *bucket; 849 unsigned int *bucket;
847 850
848 spin_lock_bh(&htable->lock); 851 spin_lock_bh(&htable->lock);
@@ -859,8 +862,7 @@ static void *dl_seq_start(struct seq_file *s, loff_t *pos)
859 862
860static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) 863static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
861{ 864{
862 struct proc_dir_entry *pde = s->private; 865 struct xt_hashlimit_htable *htable = s->private;
863 struct xt_hashlimit_htable *htable = pde->data;
864 unsigned int *bucket = (unsigned int *)v; 866 unsigned int *bucket = (unsigned int *)v;
865 867
866 *pos = ++(*bucket); 868 *pos = ++(*bucket);
@@ -874,8 +876,7 @@ static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
874static void dl_seq_stop(struct seq_file *s, void *v) 876static void dl_seq_stop(struct seq_file *s, void *v)
875 __releases(htable->lock) 877 __releases(htable->lock)
876{ 878{
877 struct proc_dir_entry *pde = s->private; 879 struct xt_hashlimit_htable *htable = s->private;
878 struct xt_hashlimit_htable *htable = pde->data;
879 unsigned int *bucket = (unsigned int *)v; 880 unsigned int *bucket = (unsigned int *)v;
880 881
881 kfree(bucket); 882 kfree(bucket);
@@ -917,8 +918,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
917 918
918static int dl_seq_show(struct seq_file *s, void *v) 919static int dl_seq_show(struct seq_file *s, void *v)
919{ 920{
920 struct proc_dir_entry *pde = s->private; 921 struct xt_hashlimit_htable *htable = s->private;
921 struct xt_hashlimit_htable *htable = pde->data;
922 unsigned int *bucket = (unsigned int *)v; 922 unsigned int *bucket = (unsigned int *)v;
923 struct dsthash_ent *ent; 923 struct dsthash_ent *ent;
924 struct hlist_node *pos; 924 struct hlist_node *pos;
@@ -944,7 +944,7 @@ static int dl_proc_open(struct inode *inode, struct file *file)
944 944
945 if (!ret) { 945 if (!ret) {
946 struct seq_file *sf = file->private_data; 946 struct seq_file *sf = file->private_data;
947 sf->private = PDE(inode); 947 sf->private = PDE(inode)->data;
948 } 948 }
949 return ret; 949 return ret;
950} 950}
@@ -957,10 +957,61 @@ static const struct file_operations dl_file_ops = {
957 .release = seq_release 957 .release = seq_release
958}; 958};
959 959
960static int __net_init hashlimit_proc_net_init(struct net *net)
961{
962 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
963
964 hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net);
965 if (!hashlimit_net->ipt_hashlimit)
966 return -ENOMEM;
967#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
968 hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net);
969 if (!hashlimit_net->ip6t_hashlimit) {
970 proc_net_remove(net, "ipt_hashlimit");
971 return -ENOMEM;
972 }
973#endif
974 return 0;
975}
976
977static void __net_exit hashlimit_proc_net_exit(struct net *net)
978{
979 proc_net_remove(net, "ipt_hashlimit");
980#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
981 proc_net_remove(net, "ip6t_hashlimit");
982#endif
983}
984
985static int __net_init hashlimit_net_init(struct net *net)
986{
987 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
988
989 INIT_HLIST_HEAD(&hashlimit_net->htables);
990 return hashlimit_proc_net_init(net);
991}
992
993static void __net_exit hashlimit_net_exit(struct net *net)
994{
995 struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
996
997 BUG_ON(!hlist_empty(&hashlimit_net->htables));
998 hashlimit_proc_net_exit(net);
999}
1000
1001static struct pernet_operations hashlimit_net_ops = {
1002 .init = hashlimit_net_init,
1003 .exit = hashlimit_net_exit,
1004 .id = &hashlimit_net_id,
1005 .size = sizeof(struct hashlimit_net),
1006};
1007
960static int __init hashlimit_mt_init(void) 1008static int __init hashlimit_mt_init(void)
961{ 1009{
962 int err; 1010 int err;
963 1011
1012 err = register_pernet_subsys(&hashlimit_net_ops);
1013 if (err < 0)
1014 return err;
964 err = xt_register_matches(hashlimit_mt_reg, 1015 err = xt_register_matches(hashlimit_mt_reg,
965 ARRAY_SIZE(hashlimit_mt_reg)); 1016 ARRAY_SIZE(hashlimit_mt_reg));
966 if (err < 0) 1017 if (err < 0)
@@ -974,41 +1025,21 @@ static int __init hashlimit_mt_init(void)
974 printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n"); 1025 printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n");
975 goto err2; 1026 goto err2;
976 } 1027 }
977 hashlimit_procdir4 = proc_mkdir("ipt_hashlimit", init_net.proc_net); 1028 return 0;
978 if (!hashlimit_procdir4) { 1029
979 printk(KERN_ERR "xt_hashlimit: unable to create proc dir "
980 "entry\n");
981 goto err3;
982 }
983 err = 0;
984#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
985 hashlimit_procdir6 = proc_mkdir("ip6t_hashlimit", init_net.proc_net);
986 if (!hashlimit_procdir6) {
987 printk(KERN_ERR "xt_hashlimit: unable to create proc dir "
988 "entry\n");
989 err = -ENOMEM;
990 }
991#endif
992 if (!err)
993 return 0;
994 remove_proc_entry("ipt_hashlimit", init_net.proc_net);
995err3:
996 kmem_cache_destroy(hashlimit_cachep);
997err2: 1030err2:
998 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); 1031 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
999err1: 1032err1:
1033 unregister_pernet_subsys(&hashlimit_net_ops);
1000 return err; 1034 return err;
1001 1035
1002} 1036}
1003 1037
1004static void __exit hashlimit_mt_exit(void) 1038static void __exit hashlimit_mt_exit(void)
1005{ 1039{
1006 remove_proc_entry("ipt_hashlimit", init_net.proc_net);
1007#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
1008 remove_proc_entry("ip6t_hashlimit", init_net.proc_net);
1009#endif
1010 kmem_cache_destroy(hashlimit_cachep); 1040 kmem_cache_destroy(hashlimit_cachep);
1011 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); 1041 xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
1042 unregister_pernet_subsys(&hashlimit_net_ops);
1012} 1043}
1013 1044
1014module_init(hashlimit_mt_init); 1045module_init(hashlimit_mt_init);
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index 2773be6a71d..a0ca5339af4 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -148,7 +148,7 @@ struct compat_xt_rateinfo {
148 148
149/* To keep the full "prev" timestamp, the upper 32 bits are stored in the 149/* To keep the full "prev" timestamp, the upper 32 bits are stored in the
150 * master pointer, which does not need to be preserved. */ 150 * master pointer, which does not need to be preserved. */
151static void limit_mt_compat_from_user(void *dst, void *src) 151static void limit_mt_compat_from_user(void *dst, const void *src)
152{ 152{
153 const struct compat_xt_rateinfo *cm = src; 153 const struct compat_xt_rateinfo *cm = src;
154 struct xt_rateinfo m = { 154 struct xt_rateinfo m = {
@@ -162,7 +162,7 @@ static void limit_mt_compat_from_user(void *dst, void *src)
162 memcpy(dst, &m, sizeof(m)); 162 memcpy(dst, &m, sizeof(m));
163} 163}
164 164
165static int limit_mt_compat_to_user(void __user *dst, void *src) 165static int limit_mt_compat_to_user(void __user *dst, const void *src)
166{ 166{
167 const struct xt_rateinfo *m = src; 167 const struct xt_rateinfo *m = src;
168 struct compat_xt_rateinfo cm = { 168 struct compat_xt_rateinfo cm = {
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 4d1a41bbd5d..4169e200588 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -334,7 +334,7 @@ static bool xt_osf_match_packet(const struct sk_buff *skb,
334 if (info->flags & XT_OSF_LOG) 334 if (info->flags & XT_OSF_LOG)
335 nf_log_packet(p->family, p->hooknum, skb, 335 nf_log_packet(p->family, p->hooknum, skb,
336 p->in, p->out, NULL, 336 p->in, p->out, NULL,
337 "%s [%s:%s] : %pi4:%d -> %pi4:%d hops=%d\n", 337 "%s [%s:%s] : %pI4:%d -> %pI4:%d hops=%d\n",
338 f->genre, f->version, f->subtype, 338 f->genre, f->version, f->subtype,
339 &ip->saddr, ntohs(tcp->source), 339 &ip->saddr, ntohs(tcp->source),
340 &ip->daddr, ntohs(tcp->dest), 340 &ip->daddr, ntohs(tcp->dest),
@@ -349,7 +349,7 @@ static bool xt_osf_match_packet(const struct sk_buff *skb,
349 349
350 if (!fcount && (info->flags & XT_OSF_LOG)) 350 if (!fcount && (info->flags & XT_OSF_LOG))
351 nf_log_packet(p->family, p->hooknum, skb, p->in, p->out, NULL, 351 nf_log_packet(p->family, p->hooknum, skb, p->in, p->out, NULL,
352 "Remote OS is not known: %pi4:%u -> %pi4:%u\n", 352 "Remote OS is not known: %pI4:%u -> %pI4:%u\n",
353 &ip->saddr, ntohs(tcp->source), 353 &ip->saddr, ntohs(tcp->source),
354 &ip->daddr, ntohs(tcp->dest)); 354 &ip->daddr, ntohs(tcp->dest));
355 355
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index fc70a49c0af..7073dbb8100 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -28,6 +28,7 @@
28#include <linux/skbuff.h> 28#include <linux/skbuff.h>
29#include <linux/inet.h> 29#include <linux/inet.h>
30#include <net/net_namespace.h> 30#include <net/net_namespace.h>
31#include <net/netns/generic.h>
31 32
32#include <linux/netfilter/x_tables.h> 33#include <linux/netfilter/x_tables.h>
33#include <linux/netfilter/xt_recent.h> 34#include <linux/netfilter/xt_recent.h>
@@ -52,7 +53,7 @@ module_param(ip_list_perms, uint, 0400);
52module_param(ip_list_uid, uint, 0400); 53module_param(ip_list_uid, uint, 0400);
53module_param(ip_list_gid, uint, 0400); 54module_param(ip_list_gid, uint, 0400);
54MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list"); 55MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list");
55MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP to remember (max. 255)"); 56MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)");
56MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs"); 57MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs");
57MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files"); 58MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files");
58MODULE_PARM_DESC(ip_list_uid,"owner of /proc/net/xt_recent/* files"); 59MODULE_PARM_DESC(ip_list_uid,"owner of /proc/net/xt_recent/* files");
@@ -78,37 +79,40 @@ struct recent_table {
78 struct list_head iphash[0]; 79 struct list_head iphash[0];
79}; 80};
80 81
81static LIST_HEAD(tables); 82struct recent_net {
83 struct list_head tables;
84#ifdef CONFIG_PROC_FS
85 struct proc_dir_entry *xt_recent;
86#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
87 struct proc_dir_entry *ipt_recent;
88#endif
89#endif
90};
91
92static int recent_net_id;
93static inline struct recent_net *recent_pernet(struct net *net)
94{
95 return net_generic(net, recent_net_id);
96}
97
82static DEFINE_SPINLOCK(recent_lock); 98static DEFINE_SPINLOCK(recent_lock);
83static DEFINE_MUTEX(recent_mutex); 99static DEFINE_MUTEX(recent_mutex);
84 100
85#ifdef CONFIG_PROC_FS 101#ifdef CONFIG_PROC_FS
86#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
87static struct proc_dir_entry *proc_old_dir;
88#endif
89static struct proc_dir_entry *recent_proc_dir;
90static const struct file_operations recent_old_fops, recent_mt_fops; 102static const struct file_operations recent_old_fops, recent_mt_fops;
91#endif 103#endif
92 104
93static u_int32_t hash_rnd; 105static u_int32_t hash_rnd __read_mostly;
94static bool hash_rnd_initted; 106static bool hash_rnd_inited __read_mostly;
95 107
96static unsigned int recent_entry_hash4(const union nf_inet_addr *addr) 108static inline unsigned int recent_entry_hash4(const union nf_inet_addr *addr)
97{ 109{
98 if (!hash_rnd_initted) {
99 get_random_bytes(&hash_rnd, sizeof(hash_rnd));
100 hash_rnd_initted = true;
101 }
102 return jhash_1word((__force u32)addr->ip, hash_rnd) & 110 return jhash_1word((__force u32)addr->ip, hash_rnd) &
103 (ip_list_hash_size - 1); 111 (ip_list_hash_size - 1);
104} 112}
105 113
106static unsigned int recent_entry_hash6(const union nf_inet_addr *addr) 114static inline unsigned int recent_entry_hash6(const union nf_inet_addr *addr)
107{ 115{
108 if (!hash_rnd_initted) {
109 get_random_bytes(&hash_rnd, sizeof(hash_rnd));
110 hash_rnd_initted = true;
111 }
112 return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6), hash_rnd) & 116 return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6), hash_rnd) &
113 (ip_list_hash_size - 1); 117 (ip_list_hash_size - 1);
114} 118}
@@ -173,18 +177,19 @@ recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
173 177
174static void recent_entry_update(struct recent_table *t, struct recent_entry *e) 178static void recent_entry_update(struct recent_table *t, struct recent_entry *e)
175{ 179{
180 e->index %= ip_pkt_list_tot;
176 e->stamps[e->index++] = jiffies; 181 e->stamps[e->index++] = jiffies;
177 if (e->index > e->nstamps) 182 if (e->index > e->nstamps)
178 e->nstamps = e->index; 183 e->nstamps = e->index;
179 e->index %= ip_pkt_list_tot;
180 list_move_tail(&e->lru_list, &t->lru_list); 184 list_move_tail(&e->lru_list, &t->lru_list);
181} 185}
182 186
183static struct recent_table *recent_table_lookup(const char *name) 187static struct recent_table *recent_table_lookup(struct recent_net *recent_net,
188 const char *name)
184{ 189{
185 struct recent_table *t; 190 struct recent_table *t;
186 191
187 list_for_each_entry(t, &tables, list) 192 list_for_each_entry(t, &recent_net->tables, list)
188 if (!strcmp(t->name, name)) 193 if (!strcmp(t->name, name))
189 return t; 194 return t;
190 return NULL; 195 return NULL;
@@ -203,6 +208,8 @@ static void recent_table_flush(struct recent_table *t)
203static bool 208static bool
204recent_mt(const struct sk_buff *skb, const struct xt_match_param *par) 209recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
205{ 210{
211 struct net *net = dev_net(par->in ? par->in : par->out);
212 struct recent_net *recent_net = recent_pernet(net);
206 const struct xt_recent_mtinfo *info = par->matchinfo; 213 const struct xt_recent_mtinfo *info = par->matchinfo;
207 struct recent_table *t; 214 struct recent_table *t;
208 struct recent_entry *e; 215 struct recent_entry *e;
@@ -235,7 +242,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
235 ttl++; 242 ttl++;
236 243
237 spin_lock_bh(&recent_lock); 244 spin_lock_bh(&recent_lock);
238 t = recent_table_lookup(info->name); 245 t = recent_table_lookup(recent_net, info->name);
239 e = recent_entry_lookup(t, &addr, par->match->family, 246 e = recent_entry_lookup(t, &addr, par->match->family,
240 (info->check_set & XT_RECENT_TTL) ? ttl : 0); 247 (info->check_set & XT_RECENT_TTL) ? ttl : 0);
241 if (e == NULL) { 248 if (e == NULL) {
@@ -260,7 +267,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
260 for (i = 0; i < e->nstamps; i++) { 267 for (i = 0; i < e->nstamps; i++) {
261 if (info->seconds && time_after(time, e->stamps[i])) 268 if (info->seconds && time_after(time, e->stamps[i]))
262 continue; 269 continue;
263 if (++hits >= info->hit_count) { 270 if (info->hit_count && ++hits >= info->hit_count) {
264 ret = !ret; 271 ret = !ret;
265 break; 272 break;
266 } 273 }
@@ -279,6 +286,7 @@ out:
279 286
280static bool recent_mt_check(const struct xt_mtchk_param *par) 287static bool recent_mt_check(const struct xt_mtchk_param *par)
281{ 288{
289 struct recent_net *recent_net = recent_pernet(par->net);
282 const struct xt_recent_mtinfo *info = par->matchinfo; 290 const struct xt_recent_mtinfo *info = par->matchinfo;
283 struct recent_table *t; 291 struct recent_table *t;
284#ifdef CONFIG_PROC_FS 292#ifdef CONFIG_PROC_FS
@@ -287,6 +295,10 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
287 unsigned i; 295 unsigned i;
288 bool ret = false; 296 bool ret = false;
289 297
298 if (unlikely(!hash_rnd_inited)) {
299 get_random_bytes(&hash_rnd, sizeof(hash_rnd));
300 hash_rnd_inited = true;
301 }
290 if (hweight8(info->check_set & 302 if (hweight8(info->check_set &
291 (XT_RECENT_SET | XT_RECENT_REMOVE | 303 (XT_RECENT_SET | XT_RECENT_REMOVE |
292 XT_RECENT_CHECK | XT_RECENT_UPDATE)) != 1) 304 XT_RECENT_CHECK | XT_RECENT_UPDATE)) != 1)
@@ -294,14 +306,18 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
294 if ((info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE)) && 306 if ((info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE)) &&
295 (info->seconds || info->hit_count)) 307 (info->seconds || info->hit_count))
296 return false; 308 return false;
297 if (info->hit_count > ip_pkt_list_tot) 309 if (info->hit_count > ip_pkt_list_tot) {
310 pr_info(KBUILD_MODNAME ": hitcount (%u) is larger than "
311 "packets to be remembered (%u)\n",
312 info->hit_count, ip_pkt_list_tot);
298 return false; 313 return false;
314 }
299 if (info->name[0] == '\0' || 315 if (info->name[0] == '\0' ||
300 strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN) 316 strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN)
301 return false; 317 return false;
302 318
303 mutex_lock(&recent_mutex); 319 mutex_lock(&recent_mutex);
304 t = recent_table_lookup(info->name); 320 t = recent_table_lookup(recent_net, info->name);
305 if (t != NULL) { 321 if (t != NULL) {
306 t->refcnt++; 322 t->refcnt++;
307 ret = true; 323 ret = true;
@@ -318,7 +334,7 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
318 for (i = 0; i < ip_list_hash_size; i++) 334 for (i = 0; i < ip_list_hash_size; i++)
319 INIT_LIST_HEAD(&t->iphash[i]); 335 INIT_LIST_HEAD(&t->iphash[i]);
320#ifdef CONFIG_PROC_FS 336#ifdef CONFIG_PROC_FS
321 pde = proc_create_data(t->name, ip_list_perms, recent_proc_dir, 337 pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent,
322 &recent_mt_fops, t); 338 &recent_mt_fops, t);
323 if (pde == NULL) { 339 if (pde == NULL) {
324 kfree(t); 340 kfree(t);
@@ -327,10 +343,10 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
327 pde->uid = ip_list_uid; 343 pde->uid = ip_list_uid;
328 pde->gid = ip_list_gid; 344 pde->gid = ip_list_gid;
329#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT 345#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
330 pde = proc_create_data(t->name, ip_list_perms, proc_old_dir, 346 pde = proc_create_data(t->name, ip_list_perms, recent_net->ipt_recent,
331 &recent_old_fops, t); 347 &recent_old_fops, t);
332 if (pde == NULL) { 348 if (pde == NULL) {
333 remove_proc_entry(t->name, proc_old_dir); 349 remove_proc_entry(t->name, recent_net->xt_recent);
334 kfree(t); 350 kfree(t);
335 goto out; 351 goto out;
336 } 352 }
@@ -339,7 +355,7 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
339#endif 355#endif
340#endif 356#endif
341 spin_lock_bh(&recent_lock); 357 spin_lock_bh(&recent_lock);
342 list_add_tail(&t->list, &tables); 358 list_add_tail(&t->list, &recent_net->tables);
343 spin_unlock_bh(&recent_lock); 359 spin_unlock_bh(&recent_lock);
344 ret = true; 360 ret = true;
345out: 361out:
@@ -349,20 +365,21 @@ out:
349 365
350static void recent_mt_destroy(const struct xt_mtdtor_param *par) 366static void recent_mt_destroy(const struct xt_mtdtor_param *par)
351{ 367{
368 struct recent_net *recent_net = recent_pernet(par->net);
352 const struct xt_recent_mtinfo *info = par->matchinfo; 369 const struct xt_recent_mtinfo *info = par->matchinfo;
353 struct recent_table *t; 370 struct recent_table *t;
354 371
355 mutex_lock(&recent_mutex); 372 mutex_lock(&recent_mutex);
356 t = recent_table_lookup(info->name); 373 t = recent_table_lookup(recent_net, info->name);
357 if (--t->refcnt == 0) { 374 if (--t->refcnt == 0) {
358 spin_lock_bh(&recent_lock); 375 spin_lock_bh(&recent_lock);
359 list_del(&t->list); 376 list_del(&t->list);
360 spin_unlock_bh(&recent_lock); 377 spin_unlock_bh(&recent_lock);
361#ifdef CONFIG_PROC_FS 378#ifdef CONFIG_PROC_FS
362#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT 379#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
363 remove_proc_entry(t->name, proc_old_dir); 380 remove_proc_entry(t->name, recent_net->ipt_recent);
364#endif 381#endif
365 remove_proc_entry(t->name, recent_proc_dir); 382 remove_proc_entry(t->name, recent_net->xt_recent);
366#endif 383#endif
367 recent_table_flush(t); 384 recent_table_flush(t);
368 kfree(t); 385 kfree(t);
@@ -611,8 +628,65 @@ static const struct file_operations recent_mt_fops = {
611 .release = seq_release_private, 628 .release = seq_release_private,
612 .owner = THIS_MODULE, 629 .owner = THIS_MODULE,
613}; 630};
631
632static int __net_init recent_proc_net_init(struct net *net)
633{
634 struct recent_net *recent_net = recent_pernet(net);
635
636 recent_net->xt_recent = proc_mkdir("xt_recent", net->proc_net);
637 if (!recent_net->xt_recent)
638 return -ENOMEM;
639#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
640 recent_net->ipt_recent = proc_mkdir("ipt_recent", net->proc_net);
641 if (!recent_net->ipt_recent) {
642 proc_net_remove(net, "xt_recent");
643 return -ENOMEM;
644 }
645#endif
646 return 0;
647}
648
649static void __net_exit recent_proc_net_exit(struct net *net)
650{
651#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
652 proc_net_remove(net, "ipt_recent");
653#endif
654 proc_net_remove(net, "xt_recent");
655}
656#else
657static inline int recent_proc_net_init(struct net *net)
658{
659 return 0;
660}
661
662static inline void recent_proc_net_exit(struct net *net)
663{
664}
614#endif /* CONFIG_PROC_FS */ 665#endif /* CONFIG_PROC_FS */
615 666
667static int __net_init recent_net_init(struct net *net)
668{
669 struct recent_net *recent_net = recent_pernet(net);
670
671 INIT_LIST_HEAD(&recent_net->tables);
672 return recent_proc_net_init(net);
673}
674
675static void __net_exit recent_net_exit(struct net *net)
676{
677 struct recent_net *recent_net = recent_pernet(net);
678
679 BUG_ON(!list_empty(&recent_net->tables));
680 recent_proc_net_exit(net);
681}
682
683static struct pernet_operations recent_net_ops = {
684 .init = recent_net_init,
685 .exit = recent_net_exit,
686 .id = &recent_net_id,
687 .size = sizeof(struct recent_net),
688};
689
616static struct xt_match recent_mt_reg[] __read_mostly = { 690static struct xt_match recent_mt_reg[] __read_mostly = {
617 { 691 {
618 .name = "recent", 692 .name = "recent",
@@ -644,39 +718,19 @@ static int __init recent_mt_init(void)
644 return -EINVAL; 718 return -EINVAL;
645 ip_list_hash_size = 1 << fls(ip_list_tot); 719 ip_list_hash_size = 1 << fls(ip_list_tot);
646 720
647 err = xt_register_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg)); 721 err = register_pernet_subsys(&recent_net_ops);
648#ifdef CONFIG_PROC_FS
649 if (err) 722 if (err)
650 return err; 723 return err;
651 recent_proc_dir = proc_mkdir("xt_recent", init_net.proc_net); 724 err = xt_register_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg));
652 if (recent_proc_dir == NULL) { 725 if (err)
653 xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg)); 726 unregister_pernet_subsys(&recent_net_ops);
654 err = -ENOMEM;
655 }
656#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
657 if (err < 0)
658 return err;
659 proc_old_dir = proc_mkdir("ipt_recent", init_net.proc_net);
660 if (proc_old_dir == NULL) {
661 remove_proc_entry("xt_recent", init_net.proc_net);
662 xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg));
663 err = -ENOMEM;
664 }
665#endif
666#endif
667 return err; 727 return err;
668} 728}
669 729
670static void __exit recent_mt_exit(void) 730static void __exit recent_mt_exit(void)
671{ 731{
672 BUG_ON(!list_empty(&tables));
673 xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg)); 732 xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg));
674#ifdef CONFIG_PROC_FS 733 unregister_pernet_subsys(&recent_net_ops);
675#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
676 remove_proc_entry("ipt_recent", init_net.proc_net);
677#endif
678 remove_proc_entry("xt_recent", init_net.proc_net);
679#endif
680} 734}
681 735
682module_init(recent_mt_init); 736module_init(recent_mt_init);
diff --git a/net/netfilter/xt_repldata.h b/net/netfilter/xt_repldata.h
new file mode 100644
index 00000000000..6efe4e5a81c
--- /dev/null
+++ b/net/netfilter/xt_repldata.h
@@ -0,0 +1,35 @@
1/*
2 * Today's hack: quantum tunneling in structs
3 *
4 * 'entries' and 'term' are never anywhere referenced by word in code. In fact,
5 * they serve as the hanging-off data accessed through repl.data[].
6 */
7
8#define xt_alloc_initial_table(type, typ2) ({ \
9 unsigned int hook_mask = info->valid_hooks; \
10 unsigned int nhooks = hweight32(hook_mask); \
11 unsigned int bytes = 0, hooknum = 0, i = 0; \
12 struct { \
13 struct type##_replace repl; \
14 struct type##_standard entries[nhooks]; \
15 struct type##_error term; \
16 } *tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); \
17 if (tbl == NULL) \
18 return NULL; \
19 strncpy(tbl->repl.name, info->name, sizeof(tbl->repl.name)); \
20 tbl->term = (struct type##_error)typ2##_ERROR_INIT; \
21 tbl->repl.valid_hooks = hook_mask; \
22 tbl->repl.num_entries = nhooks + 1; \
23 tbl->repl.size = nhooks * sizeof(struct type##_standard) + \
24 sizeof(struct type##_error); \
25 for (; hook_mask != 0; hook_mask >>= 1, ++hooknum) { \
26 if (!(hook_mask & 1)) \
27 continue; \
28 tbl->repl.hook_entry[hooknum] = bytes; \
29 tbl->repl.underflow[hooknum] = bytes; \
30 tbl->entries[i++] = (struct type##_standard) \
31 typ2##_STANDARD_INIT(NF_ACCEPT); \
32 bytes += sizeof(struct type##_standard); \
33 } \
34 tbl; \
35})
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index c5d9f97ef21..0bfeaab88ef 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -315,7 +315,6 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
315 entry_old = netlbl_domhsh_search_def(entry->domain); 315 entry_old = netlbl_domhsh_search_def(entry->domain);
316 if (entry_old == NULL) { 316 if (entry_old == NULL) {
317 entry->valid = 1; 317 entry->valid = 1;
318 INIT_RCU_HEAD(&entry->rcu);
319 318
320 if (entry->domain != NULL) { 319 if (entry->domain != NULL) {
321 u32 bkt = netlbl_domhsh_hash(entry->domain); 320 u32 bkt = netlbl_domhsh_hash(entry->domain);
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 98ed22ee2ff..852d9d7976b 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -327,7 +327,6 @@ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface,
327 entry->list.addr = addr->s_addr & mask->s_addr; 327 entry->list.addr = addr->s_addr & mask->s_addr;
328 entry->list.mask = mask->s_addr; 328 entry->list.mask = mask->s_addr;
329 entry->list.valid = 1; 329 entry->list.valid = 1;
330 INIT_RCU_HEAD(&entry->rcu);
331 entry->secid = secid; 330 entry->secid = secid;
332 331
333 spin_lock(&netlbl_unlhsh_lock); 332 spin_lock(&netlbl_unlhsh_lock);
@@ -373,7 +372,6 @@ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface,
373 entry->list.addr.s6_addr32[3] &= mask->s6_addr32[3]; 372 entry->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
374 ipv6_addr_copy(&entry->list.mask, mask); 373 ipv6_addr_copy(&entry->list.mask, mask);
375 entry->list.valid = 1; 374 entry->list.valid = 1;
376 INIT_RCU_HEAD(&entry->rcu);
377 entry->secid = secid; 375 entry->secid = secid;
378 376
379 spin_lock(&netlbl_unlhsh_lock); 377 spin_lock(&netlbl_unlhsh_lock);
@@ -410,7 +408,6 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
410 INIT_LIST_HEAD(&iface->addr4_list); 408 INIT_LIST_HEAD(&iface->addr4_list);
411 INIT_LIST_HEAD(&iface->addr6_list); 409 INIT_LIST_HEAD(&iface->addr6_list);
412 iface->valid = 1; 410 iface->valid = 1;
413 INIT_RCU_HEAD(&iface->rcu);
414 411
415 spin_lock(&netlbl_unlhsh_lock); 412 spin_lock(&netlbl_unlhsh_lock);
416 if (ifindex > 0) { 413 if (ifindex > 0) {
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index d07ecda0a92..a4b6e148c5d 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -681,9 +681,7 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
681 int chains_to_skip = cb->args[0]; 681 int chains_to_skip = cb->args[0];
682 int fams_to_skip = cb->args[1]; 682 int fams_to_skip = cb->args[1];
683 683
684 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) { 684 for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
685 if (i < chains_to_skip)
686 continue;
687 n = 0; 685 n = 0;
688 list_for_each_entry(rt, genl_family_chain(i), family_list) { 686 list_for_each_entry(rt, genl_family_chain(i), family_list) {
689 if (!rt->netnsok && !net_eq(net, &init_net)) 687 if (!rt->netnsok && !net_eq(net, &init_net))
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 71604c6613b..a249127020a 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1267,28 +1267,13 @@ static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1267 1267
1268static void *nr_info_start(struct seq_file *seq, loff_t *pos) 1268static void *nr_info_start(struct seq_file *seq, loff_t *pos)
1269{ 1269{
1270 struct sock *s;
1271 struct hlist_node *node;
1272 int i = 1;
1273
1274 spin_lock_bh(&nr_list_lock); 1270 spin_lock_bh(&nr_list_lock);
1275 if (*pos == 0) 1271 return seq_hlist_start_head(&nr_list, *pos);
1276 return SEQ_START_TOKEN;
1277
1278 sk_for_each(s, node, &nr_list) {
1279 if (i == *pos)
1280 return s;
1281 ++i;
1282 }
1283 return NULL;
1284} 1272}
1285 1273
1286static void *nr_info_next(struct seq_file *seq, void *v, loff_t *pos) 1274static void *nr_info_next(struct seq_file *seq, void *v, loff_t *pos)
1287{ 1275{
1288 ++*pos; 1276 return seq_hlist_next(v, &nr_list, pos);
1289
1290 return (v == SEQ_START_TOKEN) ? sk_head(&nr_list)
1291 : sk_next((struct sock *)v);
1292} 1277}
1293 1278
1294static void nr_info_stop(struct seq_file *seq, void *v) 1279static void nr_info_stop(struct seq_file *seq, void *v)
@@ -1298,7 +1283,7 @@ static void nr_info_stop(struct seq_file *seq, void *v)
1298 1283
1299static int nr_info_show(struct seq_file *seq, void *v) 1284static int nr_info_show(struct seq_file *seq, void *v)
1300{ 1285{
1301 struct sock *s = v; 1286 struct sock *s = sk_entry(v);
1302 struct net_device *dev; 1287 struct net_device *dev;
1303 struct nr_sock *nr; 1288 struct nr_sock *nr;
1304 const char *devname; 1289 const char *devname;
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index e2e2d33cafd..5cc648012f5 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -863,33 +863,13 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
863 863
864static void *nr_node_start(struct seq_file *seq, loff_t *pos) 864static void *nr_node_start(struct seq_file *seq, loff_t *pos)
865{ 865{
866 struct nr_node *nr_node;
867 struct hlist_node *node;
868 int i = 1;
869
870 spin_lock_bh(&nr_node_list_lock); 866 spin_lock_bh(&nr_node_list_lock);
871 if (*pos == 0) 867 return seq_hlist_start_head(&nr_node_list, *pos);
872 return SEQ_START_TOKEN;
873
874 nr_node_for_each(nr_node, node, &nr_node_list) {
875 if (i == *pos)
876 return nr_node;
877 ++i;
878 }
879
880 return NULL;
881} 868}
882 869
883static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos) 870static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
884{ 871{
885 struct hlist_node *node; 872 return seq_hlist_next(v, &nr_node_list, pos);
886 ++*pos;
887
888 node = (v == SEQ_START_TOKEN)
889 ? nr_node_list.first
890 : ((struct nr_node *)v)->node_node.next;
891
892 return hlist_entry(node, struct nr_node, node_node);
893} 873}
894 874
895static void nr_node_stop(struct seq_file *seq, void *v) 875static void nr_node_stop(struct seq_file *seq, void *v)
@@ -906,7 +886,9 @@ static int nr_node_show(struct seq_file *seq, void *v)
906 seq_puts(seq, 886 seq_puts(seq,
907 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n"); 887 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
908 else { 888 else {
909 struct nr_node *nr_node = v; 889 struct nr_node *nr_node = hlist_entry(v, struct nr_node,
890 node_node);
891
910 nr_node_lock(nr_node); 892 nr_node_lock(nr_node);
911 seq_printf(seq, "%-9s %-7s %d %d", 893 seq_printf(seq, "%-9s %-7s %d %d",
912 ax2asc(buf, &nr_node->callsign), 894 ax2asc(buf, &nr_node->callsign),
@@ -949,31 +931,13 @@ const struct file_operations nr_nodes_fops = {
949 931
950static void *nr_neigh_start(struct seq_file *seq, loff_t *pos) 932static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
951{ 933{
952 struct nr_neigh *nr_neigh;
953 struct hlist_node *node;
954 int i = 1;
955
956 spin_lock_bh(&nr_neigh_list_lock); 934 spin_lock_bh(&nr_neigh_list_lock);
957 if (*pos == 0) 935 return seq_hlist_start_head(&nr_neigh_list, *pos);
958 return SEQ_START_TOKEN;
959
960 nr_neigh_for_each(nr_neigh, node, &nr_neigh_list) {
961 if (i == *pos)
962 return nr_neigh;
963 }
964 return NULL;
965} 936}
966 937
967static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos) 938static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
968{ 939{
969 struct hlist_node *node; 940 return seq_hlist_next(v, &nr_neigh_list, pos);
970 ++*pos;
971
972 node = (v == SEQ_START_TOKEN)
973 ? nr_neigh_list.first
974 : ((struct nr_neigh *)v)->neigh_node.next;
975
976 return hlist_entry(node, struct nr_neigh, neigh_node);
977} 941}
978 942
979static void nr_neigh_stop(struct seq_file *seq, void *v) 943static void nr_neigh_stop(struct seq_file *seq, void *v)
@@ -989,8 +953,9 @@ static int nr_neigh_show(struct seq_file *seq, void *v)
989 if (v == SEQ_START_TOKEN) 953 if (v == SEQ_START_TOKEN)
990 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n"); 954 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n");
991 else { 955 else {
992 struct nr_neigh *nr_neigh = v; 956 struct nr_neigh *nr_neigh;
993 957
958 nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node);
994 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d", 959 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d",
995 nr_neigh->number, 960 nr_neigh->number,
996 ax2asc(buf, &nr_neigh->callsign), 961 ax2asc(buf, &nr_neigh->callsign),
diff --git a/net/packet/Kconfig b/net/packet/Kconfig
index 34ff93ff894..0060e3b396b 100644
--- a/net/packet/Kconfig
+++ b/net/packet/Kconfig
@@ -14,13 +14,3 @@ config PACKET
14 be called af_packet. 14 be called af_packet.
15 15
16 If unsure, say Y. 16 If unsure, say Y.
17
18config PACKET_MMAP
19 bool "Packet socket: mmapped IO"
20 depends on PACKET
21 help
22 If you say Y here, the Packet protocol driver will use an IO
23 mechanism that results in faster communication.
24
25 If unsure, say N.
26
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f126d18dbdc..2f0369367ee 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -80,6 +80,7 @@
80#include <linux/init.h> 80#include <linux/init.h>
81#include <linux/mutex.h> 81#include <linux/mutex.h>
82#include <linux/if_vlan.h> 82#include <linux/if_vlan.h>
83#include <linux/virtio_net.h>
83 84
84#ifdef CONFIG_INET 85#ifdef CONFIG_INET
85#include <net/inet_common.h> 86#include <net/inet_common.h>
@@ -156,7 +157,6 @@ struct packet_mreq_max {
156 unsigned char mr_address[MAX_ADDR_LEN]; 157 unsigned char mr_address[MAX_ADDR_LEN];
157}; 158};
158 159
159#ifdef CONFIG_PACKET_MMAP
160static int packet_set_ring(struct sock *sk, struct tpacket_req *req, 160static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
161 int closing, int tx_ring); 161 int closing, int tx_ring);
162 162
@@ -176,7 +176,6 @@ struct packet_ring_buffer {
176 176
177struct packet_sock; 177struct packet_sock;
178static int tpacket_snd(struct packet_sock *po, struct msghdr *msg); 178static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
179#endif
180 179
181static void packet_flush_mclist(struct sock *sk); 180static void packet_flush_mclist(struct sock *sk);
182 181
@@ -184,26 +183,23 @@ struct packet_sock {
184 /* struct sock has to be the first member of packet_sock */ 183 /* struct sock has to be the first member of packet_sock */
185 struct sock sk; 184 struct sock sk;
186 struct tpacket_stats stats; 185 struct tpacket_stats stats;
187#ifdef CONFIG_PACKET_MMAP
188 struct packet_ring_buffer rx_ring; 186 struct packet_ring_buffer rx_ring;
189 struct packet_ring_buffer tx_ring; 187 struct packet_ring_buffer tx_ring;
190 int copy_thresh; 188 int copy_thresh;
191#endif
192 spinlock_t bind_lock; 189 spinlock_t bind_lock;
193 struct mutex pg_vec_lock; 190 struct mutex pg_vec_lock;
194 unsigned int running:1, /* prot_hook is attached*/ 191 unsigned int running:1, /* prot_hook is attached*/
195 auxdata:1, 192 auxdata:1,
196 origdev:1; 193 origdev:1,
194 has_vnet_hdr:1;
197 int ifindex; /* bound device */ 195 int ifindex; /* bound device */
198 __be16 num; 196 __be16 num;
199 struct packet_mclist *mclist; 197 struct packet_mclist *mclist;
200#ifdef CONFIG_PACKET_MMAP
201 atomic_t mapped; 198 atomic_t mapped;
202 enum tpacket_versions tp_version; 199 enum tpacket_versions tp_version;
203 unsigned int tp_hdrlen; 200 unsigned int tp_hdrlen;
204 unsigned int tp_reserve; 201 unsigned int tp_reserve;
205 unsigned int tp_loss:1; 202 unsigned int tp_loss:1;
206#endif
207 struct packet_type prot_hook ____cacheline_aligned_in_smp; 203 struct packet_type prot_hook ____cacheline_aligned_in_smp;
208}; 204};
209 205
@@ -217,8 +213,6 @@ struct packet_skb_cb {
217 213
218#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 214#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
219 215
220#ifdef CONFIG_PACKET_MMAP
221
222static void __packet_set_status(struct packet_sock *po, void *frame, int status) 216static void __packet_set_status(struct packet_sock *po, void *frame, int status)
223{ 217{
224 union { 218 union {
@@ -313,8 +307,6 @@ static inline void packet_increment_head(struct packet_ring_buffer *buff)
313 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; 307 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
314} 308}
315 309
316#endif
317
318static inline struct packet_sock *pkt_sk(struct sock *sk) 310static inline struct packet_sock *pkt_sk(struct sock *sk)
319{ 311{
320 return (struct packet_sock *)sk; 312 return (struct packet_sock *)sk;
@@ -638,7 +630,6 @@ drop:
638 return 0; 630 return 0;
639} 631}
640 632
641#ifdef CONFIG_PACKET_MMAP
642static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, 633static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
643 struct packet_type *pt, struct net_device *orig_dev) 634 struct packet_type *pt, struct net_device *orig_dev)
644{ 635{
@@ -1054,7 +1045,30 @@ out:
1054 mutex_unlock(&po->pg_vec_lock); 1045 mutex_unlock(&po->pg_vec_lock);
1055 return err; 1046 return err;
1056} 1047}
1057#endif 1048
1049static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
1050 size_t reserve, size_t len,
1051 size_t linear, int noblock,
1052 int *err)
1053{
1054 struct sk_buff *skb;
1055
1056 /* Under a page? Don't bother with paged skb. */
1057 if (prepad + len < PAGE_SIZE || !linear)
1058 linear = len;
1059
1060 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1061 err);
1062 if (!skb)
1063 return NULL;
1064
1065 skb_reserve(skb, reserve);
1066 skb_put(skb, linear);
1067 skb->data_len = len - linear;
1068 skb->len += len - linear;
1069
1070 return skb;
1071}
1058 1072
1059static int packet_snd(struct socket *sock, 1073static int packet_snd(struct socket *sock,
1060 struct msghdr *msg, size_t len) 1074 struct msghdr *msg, size_t len)
@@ -1066,14 +1080,17 @@ static int packet_snd(struct socket *sock,
1066 __be16 proto; 1080 __be16 proto;
1067 unsigned char *addr; 1081 unsigned char *addr;
1068 int ifindex, err, reserve = 0; 1082 int ifindex, err, reserve = 0;
1083 struct virtio_net_hdr vnet_hdr = { 0 };
1084 int offset = 0;
1085 int vnet_hdr_len;
1086 struct packet_sock *po = pkt_sk(sk);
1087 unsigned short gso_type = 0;
1069 1088
1070 /* 1089 /*
1071 * Get and verify the address. 1090 * Get and verify the address.
1072 */ 1091 */
1073 1092
1074 if (saddr == NULL) { 1093 if (saddr == NULL) {
1075 struct packet_sock *po = pkt_sk(sk);
1076
1077 ifindex = po->ifindex; 1094 ifindex = po->ifindex;
1078 proto = po->num; 1095 proto = po->num;
1079 addr = NULL; 1096 addr = NULL;
@@ -1100,25 +1117,74 @@ static int packet_snd(struct socket *sock,
1100 if (!(dev->flags & IFF_UP)) 1117 if (!(dev->flags & IFF_UP))
1101 goto out_unlock; 1118 goto out_unlock;
1102 1119
1120 if (po->has_vnet_hdr) {
1121 vnet_hdr_len = sizeof(vnet_hdr);
1122
1123 err = -EINVAL;
1124 if (len < vnet_hdr_len)
1125 goto out_unlock;
1126
1127 len -= vnet_hdr_len;
1128
1129 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
1130 vnet_hdr_len);
1131 if (err < 0)
1132 goto out_unlock;
1133
1134 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1135 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
1136 vnet_hdr.hdr_len))
1137 vnet_hdr.hdr_len = vnet_hdr.csum_start +
1138 vnet_hdr.csum_offset + 2;
1139
1140 err = -EINVAL;
1141 if (vnet_hdr.hdr_len > len)
1142 goto out_unlock;
1143
1144 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1145 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1146 case VIRTIO_NET_HDR_GSO_TCPV4:
1147 gso_type = SKB_GSO_TCPV4;
1148 break;
1149 case VIRTIO_NET_HDR_GSO_TCPV6:
1150 gso_type = SKB_GSO_TCPV6;
1151 break;
1152 case VIRTIO_NET_HDR_GSO_UDP:
1153 gso_type = SKB_GSO_UDP;
1154 break;
1155 default:
1156 goto out_unlock;
1157 }
1158
1159 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1160 gso_type |= SKB_GSO_TCP_ECN;
1161
1162 if (vnet_hdr.gso_size == 0)
1163 goto out_unlock;
1164
1165 }
1166 }
1167
1103 err = -EMSGSIZE; 1168 err = -EMSGSIZE;
1104 if (len > dev->mtu+reserve) 1169 if (!gso_type && (len > dev->mtu+reserve))
1105 goto out_unlock; 1170 goto out_unlock;
1106 1171
1107 skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev), 1172 err = -ENOBUFS;
1108 msg->msg_flags & MSG_DONTWAIT, &err); 1173 skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
1174 LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
1175 msg->msg_flags & MSG_DONTWAIT, &err);
1109 if (skb == NULL) 1176 if (skb == NULL)
1110 goto out_unlock; 1177 goto out_unlock;
1111 1178
1112 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 1179 skb_set_network_header(skb, reserve);
1113 skb_reset_network_header(skb);
1114 1180
1115 err = -EINVAL; 1181 err = -EINVAL;
1116 if (sock->type == SOCK_DGRAM && 1182 if (sock->type == SOCK_DGRAM &&
1117 dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len) < 0) 1183 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
1118 goto out_free; 1184 goto out_free;
1119 1185
1120 /* Returns -EFAULT on error */ 1186 /* Returns -EFAULT on error */
1121 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); 1187 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
1122 if (err) 1188 if (err)
1123 goto out_free; 1189 goto out_free;
1124 1190
@@ -1127,6 +1193,25 @@ static int packet_snd(struct socket *sock,
1127 skb->priority = sk->sk_priority; 1193 skb->priority = sk->sk_priority;
1128 skb->mark = sk->sk_mark; 1194 skb->mark = sk->sk_mark;
1129 1195
1196 if (po->has_vnet_hdr) {
1197 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1198 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
1199 vnet_hdr.csum_offset)) {
1200 err = -EINVAL;
1201 goto out_free;
1202 }
1203 }
1204
1205 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
1206 skb_shinfo(skb)->gso_type = gso_type;
1207
1208 /* Header must be checked, and gso_segs computed. */
1209 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1210 skb_shinfo(skb)->gso_segs = 0;
1211
1212 len += vnet_hdr_len;
1213 }
1214
1130 /* 1215 /*
1131 * Now send it 1216 * Now send it
1132 */ 1217 */
@@ -1151,13 +1236,11 @@ out:
1151static int packet_sendmsg(struct kiocb *iocb, struct socket *sock, 1236static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
1152 struct msghdr *msg, size_t len) 1237 struct msghdr *msg, size_t len)
1153{ 1238{
1154#ifdef CONFIG_PACKET_MMAP
1155 struct sock *sk = sock->sk; 1239 struct sock *sk = sock->sk;
1156 struct packet_sock *po = pkt_sk(sk); 1240 struct packet_sock *po = pkt_sk(sk);
1157 if (po->tx_ring.pg_vec) 1241 if (po->tx_ring.pg_vec)
1158 return tpacket_snd(po, msg); 1242 return tpacket_snd(po, msg);
1159 else 1243 else
1160#endif
1161 return packet_snd(sock, msg, len); 1244 return packet_snd(sock, msg, len);
1162} 1245}
1163 1246
@@ -1171,9 +1254,7 @@ static int packet_release(struct socket *sock)
1171 struct sock *sk = sock->sk; 1254 struct sock *sk = sock->sk;
1172 struct packet_sock *po; 1255 struct packet_sock *po;
1173 struct net *net; 1256 struct net *net;
1174#ifdef CONFIG_PACKET_MMAP
1175 struct tpacket_req req; 1257 struct tpacket_req req;
1176#endif
1177 1258
1178 if (!sk) 1259 if (!sk)
1179 return 0; 1260 return 0;
@@ -1181,28 +1262,25 @@ static int packet_release(struct socket *sock)
1181 net = sock_net(sk); 1262 net = sock_net(sk);
1182 po = pkt_sk(sk); 1263 po = pkt_sk(sk);
1183 1264
1184 write_lock_bh(&net->packet.sklist_lock); 1265 spin_lock_bh(&net->packet.sklist_lock);
1185 sk_del_node_init(sk); 1266 sk_del_node_init_rcu(sk);
1186 sock_prot_inuse_add(net, sk->sk_prot, -1); 1267 sock_prot_inuse_add(net, sk->sk_prot, -1);
1187 write_unlock_bh(&net->packet.sklist_lock); 1268 spin_unlock_bh(&net->packet.sklist_lock);
1188
1189 /*
1190 * Unhook packet receive handler.
1191 */
1192 1269
1270 spin_lock(&po->bind_lock);
1193 if (po->running) { 1271 if (po->running) {
1194 /* 1272 /*
1195 * Remove the protocol hook 1273 * Remove from protocol table
1196 */ 1274 */
1197 dev_remove_pack(&po->prot_hook);
1198 po->running = 0; 1275 po->running = 0;
1199 po->num = 0; 1276 po->num = 0;
1277 __dev_remove_pack(&po->prot_hook);
1200 __sock_put(sk); 1278 __sock_put(sk);
1201 } 1279 }
1280 spin_unlock(&po->bind_lock);
1202 1281
1203 packet_flush_mclist(sk); 1282 packet_flush_mclist(sk);
1204 1283
1205#ifdef CONFIG_PACKET_MMAP
1206 memset(&req, 0, sizeof(req)); 1284 memset(&req, 0, sizeof(req));
1207 1285
1208 if (po->rx_ring.pg_vec) 1286 if (po->rx_ring.pg_vec)
@@ -1210,12 +1288,11 @@ static int packet_release(struct socket *sock)
1210 1288
1211 if (po->tx_ring.pg_vec) 1289 if (po->tx_ring.pg_vec)
1212 packet_set_ring(sk, &req, 1, 1); 1290 packet_set_ring(sk, &req, 1, 1);
1213#endif
1214 1291
1292 synchronize_net();
1215 /* 1293 /*
1216 * Now the socket is dead. No more input will appear. 1294 * Now the socket is dead. No more input will appear.
1217 */ 1295 */
1218
1219 sock_orphan(sk); 1296 sock_orphan(sk);
1220 sock->sk = NULL; 1297 sock->sk = NULL;
1221 1298
@@ -1399,10 +1476,11 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
1399 po->running = 1; 1476 po->running = 1;
1400 } 1477 }
1401 1478
1402 write_lock_bh(&net->packet.sklist_lock); 1479 spin_lock_bh(&net->packet.sklist_lock);
1403 sk_add_node(sk, &net->packet.sklist); 1480 sk_add_node_rcu(sk, &net->packet.sklist);
1404 sock_prot_inuse_add(net, &packet_proto, 1); 1481 sock_prot_inuse_add(net, &packet_proto, 1);
1405 write_unlock_bh(&net->packet.sklist_lock); 1482 spin_unlock_bh(&net->packet.sklist_lock);
1483
1406 return 0; 1484 return 0;
1407out: 1485out:
1408 return err; 1486 return err;
@@ -1420,6 +1498,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1420 struct sk_buff *skb; 1498 struct sk_buff *skb;
1421 int copied, err; 1499 int copied, err;
1422 struct sockaddr_ll *sll; 1500 struct sockaddr_ll *sll;
1501 int vnet_hdr_len = 0;
1423 1502
1424 err = -EINVAL; 1503 err = -EINVAL;
1425 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) 1504 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
@@ -1451,6 +1530,48 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1451 if (skb == NULL) 1530 if (skb == NULL)
1452 goto out; 1531 goto out;
1453 1532
1533 if (pkt_sk(sk)->has_vnet_hdr) {
1534 struct virtio_net_hdr vnet_hdr = { 0 };
1535
1536 err = -EINVAL;
1537 vnet_hdr_len = sizeof(vnet_hdr);
1538 if ((len -= vnet_hdr_len) < 0)
1539 goto out_free;
1540
1541 if (skb_is_gso(skb)) {
1542 struct skb_shared_info *sinfo = skb_shinfo(skb);
1543
1544 /* This is a hint as to how much should be linear. */
1545 vnet_hdr.hdr_len = skb_headlen(skb);
1546 vnet_hdr.gso_size = sinfo->gso_size;
1547 if (sinfo->gso_type & SKB_GSO_TCPV4)
1548 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1549 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1550 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1551 else if (sinfo->gso_type & SKB_GSO_UDP)
1552 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1553 else if (sinfo->gso_type & SKB_GSO_FCOE)
1554 goto out_free;
1555 else
1556 BUG();
1557 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1558 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1559 } else
1560 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1561
1562 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1563 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1564 vnet_hdr.csum_start = skb->csum_start -
1565 skb_headroom(skb);
1566 vnet_hdr.csum_offset = skb->csum_offset;
1567 } /* else everything is zero */
1568
1569 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
1570 vnet_hdr_len);
1571 if (err < 0)
1572 goto out_free;
1573 }
1574
1454 /* 1575 /*
1455 * If the address length field is there to be filled in, we fill 1576 * If the address length field is there to be filled in, we fill
1456 * it in now. 1577 * it in now.
@@ -1502,7 +1623,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1502 * Free or return the buffer as appropriate. Again this 1623 * Free or return the buffer as appropriate. Again this
1503 * hides all the races and re-entrancy issues from us. 1624 * hides all the races and re-entrancy issues from us.
1504 */ 1625 */
1505 err = (flags&MSG_TRUNC) ? skb->len : copied; 1626 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
1506 1627
1507out_free: 1628out_free:
1508 skb_free_datagram(sk, skb); 1629 skb_free_datagram(sk, skb);
@@ -1732,7 +1853,6 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1732 return ret; 1853 return ret;
1733 } 1854 }
1734 1855
1735#ifdef CONFIG_PACKET_MMAP
1736 case PACKET_RX_RING: 1856 case PACKET_RX_RING:
1737 case PACKET_TX_RING: 1857 case PACKET_TX_RING:
1738 { 1858 {
@@ -1740,6 +1860,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1740 1860
1741 if (optlen < sizeof(req)) 1861 if (optlen < sizeof(req))
1742 return -EINVAL; 1862 return -EINVAL;
1863 if (pkt_sk(sk)->has_vnet_hdr)
1864 return -EINVAL;
1743 if (copy_from_user(&req, optval, sizeof(req))) 1865 if (copy_from_user(&req, optval, sizeof(req)))
1744 return -EFAULT; 1866 return -EFAULT;
1745 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING); 1867 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
@@ -1801,7 +1923,6 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1801 po->tp_loss = !!val; 1923 po->tp_loss = !!val;
1802 return 0; 1924 return 0;
1803 } 1925 }
1804#endif
1805 case PACKET_AUXDATA: 1926 case PACKET_AUXDATA:
1806 { 1927 {
1807 int val; 1928 int val;
@@ -1826,6 +1947,22 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
1826 po->origdev = !!val; 1947 po->origdev = !!val;
1827 return 0; 1948 return 0;
1828 } 1949 }
1950 case PACKET_VNET_HDR:
1951 {
1952 int val;
1953
1954 if (sock->type != SOCK_RAW)
1955 return -EINVAL;
1956 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1957 return -EBUSY;
1958 if (optlen < sizeof(val))
1959 return -EINVAL;
1960 if (copy_from_user(&val, optval, sizeof(val)))
1961 return -EFAULT;
1962
1963 po->has_vnet_hdr = !!val;
1964 return 0;
1965 }
1829 default: 1966 default:
1830 return -ENOPROTOOPT; 1967 return -ENOPROTOOPT;
1831 } 1968 }
@@ -1876,7 +2013,13 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1876 2013
1877 data = &val; 2014 data = &val;
1878 break; 2015 break;
1879#ifdef CONFIG_PACKET_MMAP 2016 case PACKET_VNET_HDR:
2017 if (len > sizeof(int))
2018 len = sizeof(int);
2019 val = po->has_vnet_hdr;
2020
2021 data = &val;
2022 break;
1880 case PACKET_VERSION: 2023 case PACKET_VERSION:
1881 if (len > sizeof(int)) 2024 if (len > sizeof(int))
1882 len = sizeof(int); 2025 len = sizeof(int);
@@ -1912,7 +2055,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1912 val = po->tp_loss; 2055 val = po->tp_loss;
1913 data = &val; 2056 data = &val;
1914 break; 2057 break;
1915#endif
1916 default: 2058 default:
1917 return -ENOPROTOOPT; 2059 return -ENOPROTOOPT;
1918 } 2060 }
@@ -1932,8 +2074,8 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void
1932 struct net_device *dev = data; 2074 struct net_device *dev = data;
1933 struct net *net = dev_net(dev); 2075 struct net *net = dev_net(dev);
1934 2076
1935 read_lock(&net->packet.sklist_lock); 2077 rcu_read_lock();
1936 sk_for_each(sk, node, &net->packet.sklist) { 2078 sk_for_each_rcu(sk, node, &net->packet.sklist) {
1937 struct packet_sock *po = pkt_sk(sk); 2079 struct packet_sock *po = pkt_sk(sk);
1938 2080
1939 switch (msg) { 2081 switch (msg) {
@@ -1961,18 +2103,19 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void
1961 } 2103 }
1962 break; 2104 break;
1963 case NETDEV_UP: 2105 case NETDEV_UP:
1964 spin_lock(&po->bind_lock); 2106 if (dev->ifindex == po->ifindex) {
1965 if (dev->ifindex == po->ifindex && po->num && 2107 spin_lock(&po->bind_lock);
1966 !po->running) { 2108 if (po->num && !po->running) {
1967 dev_add_pack(&po->prot_hook); 2109 dev_add_pack(&po->prot_hook);
1968 sock_hold(sk); 2110 sock_hold(sk);
1969 po->running = 1; 2111 po->running = 1;
2112 }
2113 spin_unlock(&po->bind_lock);
1970 } 2114 }
1971 spin_unlock(&po->bind_lock);
1972 break; 2115 break;
1973 } 2116 }
1974 } 2117 }
1975 read_unlock(&net->packet.sklist_lock); 2118 rcu_read_unlock();
1976 return NOTIFY_DONE; 2119 return NOTIFY_DONE;
1977} 2120}
1978 2121
@@ -2032,11 +2175,6 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
2032 return 0; 2175 return 0;
2033} 2176}
2034 2177
2035#ifndef CONFIG_PACKET_MMAP
2036#define packet_mmap sock_no_mmap
2037#define packet_poll datagram_poll
2038#else
2039
2040static unsigned int packet_poll(struct file *file, struct socket *sock, 2178static unsigned int packet_poll(struct file *file, struct socket *sock,
2041 poll_table *wait) 2179 poll_table *wait)
2042{ 2180{
@@ -2318,8 +2456,6 @@ out:
2318 mutex_unlock(&po->pg_vec_lock); 2456 mutex_unlock(&po->pg_vec_lock);
2319 return err; 2457 return err;
2320} 2458}
2321#endif
2322
2323 2459
2324static const struct proto_ops packet_ops_spkt = { 2460static const struct proto_ops packet_ops_spkt = {
2325 .family = PF_PACKET, 2461 .family = PF_PACKET,
@@ -2374,40 +2510,26 @@ static struct notifier_block packet_netdev_notifier = {
2374}; 2510};
2375 2511
2376#ifdef CONFIG_PROC_FS 2512#ifdef CONFIG_PROC_FS
2377static inline struct sock *packet_seq_idx(struct net *net, loff_t off)
2378{
2379 struct sock *s;
2380 struct hlist_node *node;
2381
2382 sk_for_each(s, node, &net->packet.sklist) {
2383 if (!off--)
2384 return s;
2385 }
2386 return NULL;
2387}
2388 2513
2389static void *packet_seq_start(struct seq_file *seq, loff_t *pos) 2514static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
2390 __acquires(seq_file_net(seq)->packet.sklist_lock) 2515 __acquires(RCU)
2391{ 2516{
2392 struct net *net = seq_file_net(seq); 2517 struct net *net = seq_file_net(seq);
2393 read_lock(&net->packet.sklist_lock); 2518
2394 return *pos ? packet_seq_idx(net, *pos - 1) : SEQ_START_TOKEN; 2519 rcu_read_lock();
2520 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
2395} 2521}
2396 2522
2397static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2523static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2398{ 2524{
2399 struct net *net = seq_file_net(seq); 2525 struct net *net = seq_file_net(seq);
2400 ++*pos; 2526 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
2401 return (v == SEQ_START_TOKEN)
2402 ? sk_head(&net->packet.sklist)
2403 : sk_next((struct sock *)v) ;
2404} 2527}
2405 2528
2406static void packet_seq_stop(struct seq_file *seq, void *v) 2529static void packet_seq_stop(struct seq_file *seq, void *v)
2407 __releases(seq_file_net(seq)->packet.sklist_lock) 2530 __releases(RCU)
2408{ 2531{
2409 struct net *net = seq_file_net(seq); 2532 rcu_read_unlock();
2410 read_unlock(&net->packet.sklist_lock);
2411} 2533}
2412 2534
2413static int packet_seq_show(struct seq_file *seq, void *v) 2535static int packet_seq_show(struct seq_file *seq, void *v)
@@ -2415,7 +2537,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
2415 if (v == SEQ_START_TOKEN) 2537 if (v == SEQ_START_TOKEN)
2416 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n"); 2538 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
2417 else { 2539 else {
2418 struct sock *s = v; 2540 struct sock *s = sk_entry(v);
2419 const struct packet_sock *po = pkt_sk(s); 2541 const struct packet_sock *po = pkt_sk(s);
2420 2542
2421 seq_printf(seq, 2543 seq_printf(seq,
@@ -2457,9 +2579,9 @@ static const struct file_operations packet_seq_fops = {
2457 2579
2458#endif 2580#endif
2459 2581
2460static int packet_net_init(struct net *net) 2582static int __net_init packet_net_init(struct net *net)
2461{ 2583{
2462 rwlock_init(&net->packet.sklist_lock); 2584 spin_lock_init(&net->packet.sklist_lock);
2463 INIT_HLIST_HEAD(&net->packet.sklist); 2585 INIT_HLIST_HEAD(&net->packet.sklist);
2464 2586
2465 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops)) 2587 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
@@ -2468,7 +2590,7 @@ static int packet_net_init(struct net *net)
2468 return 0; 2590 return 0;
2469} 2591}
2470 2592
2471static void packet_net_exit(struct net *net) 2593static void __net_exit packet_net_exit(struct net *net)
2472{ 2594{
2473 proc_net_remove(net, "packet"); 2595 proc_net_remove(net, "packet");
2474} 2596}
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 67f072e94d0..387197b579b 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -75,7 +75,8 @@ static int pn_sendmsg(struct kiocb *iocb, struct sock *sk,
75 struct sk_buff *skb; 75 struct sk_buff *skb;
76 int err; 76 int err;
77 77
78 if (msg->msg_flags & MSG_OOB) 78 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
79 MSG_CMSG_COMPAT))
79 return -EOPNOTSUPP; 80 return -EOPNOTSUPP;
80 81
81 if (msg->msg_name == NULL) 82 if (msg->msg_name == NULL)
@@ -119,7 +120,8 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
119 int rval = -EOPNOTSUPP; 120 int rval = -EOPNOTSUPP;
120 int copylen; 121 int copylen;
121 122
122 if (flags & MSG_OOB) 123 if (flags & ~(MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_NOSIGNAL|
124 MSG_CMSG_COMPAT))
123 goto out_nofree; 125 goto out_nofree;
124 126
125 if (addr_len) 127 if (addr_len)
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index d183509d3fa..d01208968c8 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -96,11 +96,11 @@ static int gprs_recv(struct gprs_dev *gp, struct sk_buff *skb)
96 goto drop; 96 goto drop;
97 } 97 }
98 98
99 if (likely(skb_headroom(skb) & 3)) { 99 if (skb_headroom(skb) & 3) {
100 struct sk_buff *rskb, *fs; 100 struct sk_buff *rskb, *fs;
101 int flen = 0; 101 int flen = 0;
102 102
103 /* Phonet Pipe data header is misaligned (3 bytes), 103 /* Phonet Pipe data header may be misaligned (3 bytes),
104 * so wrap the IP packet as a single fragment of an head-less 104 * so wrap the IP packet as a single fragment of an head-less
105 * socket buffer. The network stack will pull what it needs, 105 * socket buffer. The network stack will pull what it needs,
106 * but at least, the whole IP payload is not memcpy'd. */ 106 * but at least, the whole IP payload is not memcpy'd. */
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index b6356f3832f..360cf377693 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -354,6 +354,9 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
354 queue = &pn->ctrlreq_queue; 354 queue = &pn->ctrlreq_queue;
355 goto queue; 355 goto queue;
356 356
357 case PNS_PIPE_ALIGNED_DATA:
358 __skb_pull(skb, 1);
359 /* fall through */
357 case PNS_PIPE_DATA: 360 case PNS_PIPE_DATA:
358 __skb_pull(skb, 3); /* Pipe data header */ 361 __skb_pull(skb, 3); /* Pipe data header */
359 if (!pn_flow_safe(pn->rx_fc)) { 362 if (!pn_flow_safe(pn->rx_fc)) {
@@ -441,6 +444,7 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
441 struct sockaddr_pn dst; 444 struct sockaddr_pn dst;
442 u16 peer_type; 445 u16 peer_type;
443 u8 pipe_handle, enabled, n_sb; 446 u8 pipe_handle, enabled, n_sb;
447 u8 aligned = 0;
444 448
445 if (!pskb_pull(skb, sizeof(*hdr) + 4)) 449 if (!pskb_pull(skb, sizeof(*hdr) + 4))
446 return -EINVAL; 450 return -EINVAL;
@@ -479,6 +483,9 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
479 return -EINVAL; 483 return -EINVAL;
480 peer_type = (peer_type & 0xff00) | data[0]; 484 peer_type = (peer_type & 0xff00) | data[0];
481 break; 485 break;
486 case PN_PIPE_SB_ALIGNED_DATA:
487 aligned = data[0] != 0;
488 break;
482 } 489 }
483 n_sb--; 490 n_sb--;
484 } 491 }
@@ -510,6 +517,7 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
510 newpn->rx_credits = 0; 517 newpn->rx_credits = 0;
511 newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL; 518 newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
512 newpn->init_enable = enabled; 519 newpn->init_enable = enabled;
520 newpn->aligned = aligned;
513 521
514 BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue)); 522 BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue));
515 skb_queue_head(&newsk->sk_receive_queue, skb); 523 skb_queue_head(&newsk->sk_receive_queue, skb);
@@ -829,11 +837,15 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
829 return -ENOBUFS; 837 return -ENOBUFS;
830 } 838 }
831 839
832 skb_push(skb, 3); 840 skb_push(skb, 3 + pn->aligned);
833 skb_reset_transport_header(skb); 841 skb_reset_transport_header(skb);
834 ph = pnp_hdr(skb); 842 ph = pnp_hdr(skb);
835 ph->utid = 0; 843 ph->utid = 0;
836 ph->message_id = PNS_PIPE_DATA; 844 if (pn->aligned) {
845 ph->message_id = PNS_PIPE_ALIGNED_DATA;
846 ph->data[0] = 0; /* padding */
847 } else
848 ph->message_id = PNS_PIPE_DATA;
837 ph->pipe_handle = pn->pipe_handle; 849 ph->pipe_handle = pn->pipe_handle;
838 850
839 return pn_skb_send(sk, skb, &pipe_srv); 851 return pn_skb_send(sk, skb, &pipe_srv);
@@ -848,7 +860,9 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
848 int flags = msg->msg_flags; 860 int flags = msg->msg_flags;
849 int err, done; 861 int err, done;
850 862
851 if (msg->msg_flags & MSG_OOB || !(msg->msg_flags & MSG_EOR)) 863 if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
864 MSG_CMSG_COMPAT)) ||
865 !(msg->msg_flags & MSG_EOR))
852 return -EOPNOTSUPP; 866 return -EOPNOTSUPP;
853 867
854 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len, 868 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
@@ -927,6 +941,9 @@ int pep_write(struct sock *sk, struct sk_buff *skb)
927 struct sk_buff *rskb, *fs; 941 struct sk_buff *rskb, *fs;
928 int flen = 0; 942 int flen = 0;
929 943
944 if (pep_sk(sk)->aligned)
945 return pipe_skb_send(sk, skb);
946
930 rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC); 947 rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
931 if (!rskb) { 948 if (!rskb) {
932 kfree_skb(skb); 949 kfree_skb(skb);
@@ -966,6 +983,10 @@ static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
966 struct sk_buff *skb; 983 struct sk_buff *skb;
967 int err; 984 int err;
968 985
986 if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL|
987 MSG_NOSIGNAL|MSG_CMSG_COMPAT))
988 return -EOPNOTSUPP;
989
969 if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE))) 990 if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE)))
970 return -ENOTCONN; 991 return -ENOTCONN;
971 992
@@ -973,6 +994,8 @@ static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
973 /* Dequeue and acknowledge control request */ 994 /* Dequeue and acknowledge control request */
974 struct pep_sock *pn = pep_sk(sk); 995 struct pep_sock *pn = pep_sk(sk);
975 996
997 if (flags & MSG_PEEK)
998 return -EOPNOTSUPP;
976 skb = skb_dequeue(&pn->ctrlreq_queue); 999 skb = skb_dequeue(&pn->ctrlreq_queue);
977 if (skb) { 1000 if (skb) {
978 pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR, 1001 pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index bc4a33bf2d3..c597cc53a6f 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -311,7 +311,7 @@ static struct notifier_block phonet_device_notifier = {
311}; 311};
312 312
313/* Per-namespace Phonet devices handling */ 313/* Per-namespace Phonet devices handling */
314static int phonet_init_net(struct net *net) 314static int __net_init phonet_init_net(struct net *net)
315{ 315{
316 struct phonet_net *pnn = net_generic(net, phonet_net_id); 316 struct phonet_net *pnn = net_generic(net, phonet_net_id);
317 317
@@ -324,7 +324,7 @@ static int phonet_init_net(struct net *net)
324 return 0; 324 return 0;
325} 325}
326 326
327static void phonet_exit_net(struct net *net) 327static void __net_exit phonet_exit_net(struct net *net)
328{ 328{
329 struct phonet_net *pnn = net_generic(net, phonet_net_id); 329 struct phonet_net *pnn = net_generic(net, phonet_net_id);
330 struct net_device *dev; 330 struct net_device *dev;
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 211522f9a9a..05625628598 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -90,8 +90,8 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
90 90
91 ret = sock->ops->bind(sock, (struct sockaddr *)&src, sizeof(src)); 91 ret = sock->ops->bind(sock, (struct sockaddr *)&src, sizeof(src));
92 if (ret) { 92 if (ret) {
93 rdsdebug("bind failed with %d at address %u.%u.%u.%u\n", 93 rdsdebug("bind failed with %d at address %pI4\n",
94 ret, NIPQUAD(conn->c_laddr)); 94 ret, &conn->c_laddr);
95 goto out; 95 goto out;
96 } 96 }
97 97
@@ -108,8 +108,7 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
108 O_NONBLOCK); 108 O_NONBLOCK);
109 sock = NULL; 109 sock = NULL;
110 110
111 rdsdebug("connect to address %u.%u.%u.%u returned %d\n", 111 rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret);
112 NIPQUAD(conn->c_faddr), ret);
113 if (ret == -EINPROGRESS) 112 if (ret == -EINPROGRESS)
114 ret = 0; 113 ret = 0;
115 114
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 45474a43686..53cb1b54165 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -66,9 +66,9 @@ static int rds_tcp_accept_one(struct socket *sock)
66 66
67 inet = inet_sk(new_sock->sk); 67 inet = inet_sk(new_sock->sk);
68 68
69 rdsdebug("accepted tcp %u.%u.%u.%u:%u -> %u.%u.%u.%u:%u\n", 69 rdsdebug("accepted tcp %pI4:%u -> %pI4:%u\n",
70 NIPQUAD(inet->inet_saddr), ntohs(inet->inet_sport), 70 &inet->inet_saddr, ntohs(inet->inet_sport),
71 NIPQUAD(inet->inet_daddr), ntohs(inet->inet_dport)); 71 &inet->inet_daddr, ntohs(inet->inet_dport));
72 72
73 conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr, 73 conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr,
74 &rds_tcp_transport, GFP_KERNEL); 74 &rds_tcp_transport, GFP_KERNEL);
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index ab545e0cd5d..34fdcc059e5 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -193,9 +193,9 @@ out:
193 rds_tcp_stats_inc(s_tcp_sndbuf_full); 193 rds_tcp_stats_inc(s_tcp_sndbuf_full);
194 ret = 0; 194 ret = 0;
195 } else { 195 } else {
196 printk(KERN_WARNING "RDS/tcp: send to %u.%u.%u.%u " 196 printk(KERN_WARNING "RDS/tcp: send to %pI4 "
197 "returned %d, disconnecting and reconnecting\n", 197 "returned %d, disconnecting and reconnecting\n",
198 NIPQUAD(conn->c_faddr), ret); 198 &conn->c_faddr, ret);
199 rds_conn_drop(conn); 199 rds_conn_drop(conn);
200 } 200 }
201 } 201 }
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 8feb9e5d662..e90b9b6c16a 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1404,29 +1404,13 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1404static void *rose_info_start(struct seq_file *seq, loff_t *pos) 1404static void *rose_info_start(struct seq_file *seq, loff_t *pos)
1405 __acquires(rose_list_lock) 1405 __acquires(rose_list_lock)
1406{ 1406{
1407 int i;
1408 struct sock *s;
1409 struct hlist_node *node;
1410
1411 spin_lock_bh(&rose_list_lock); 1407 spin_lock_bh(&rose_list_lock);
1412 if (*pos == 0) 1408 return seq_hlist_start_head(&rose_list, *pos);
1413 return SEQ_START_TOKEN;
1414
1415 i = 1;
1416 sk_for_each(s, node, &rose_list) {
1417 if (i == *pos)
1418 return s;
1419 ++i;
1420 }
1421 return NULL;
1422} 1409}
1423 1410
1424static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos) 1411static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
1425{ 1412{
1426 ++*pos; 1413 return seq_hlist_next(v, &rose_list, pos);
1427
1428 return (v == SEQ_START_TOKEN) ? sk_head(&rose_list)
1429 : sk_next((struct sock *)v);
1430} 1414}
1431 1415
1432static void rose_info_stop(struct seq_file *seq, void *v) 1416static void rose_info_stop(struct seq_file *seq, void *v)
@@ -1444,7 +1428,7 @@ static int rose_info_show(struct seq_file *seq, void *v)
1444 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n"); 1428 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
1445 1429
1446 else { 1430 else {
1447 struct sock *s = v; 1431 struct sock *s = sk_entry(v);
1448 struct rose_sock *rose = rose_sk(s); 1432 struct rose_sock *rose = rose_sk(s);
1449 const char *devname, *callsign; 1433 const char *devname, *callsign;
1450 const struct net_device *dev = rose->device; 1434 const struct net_device *dev = rose->device;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 75fd1c672c6..6cd491013b5 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1707,6 +1707,7 @@ static int __init pktsched_init(void)
1707{ 1707{
1708 register_qdisc(&pfifo_qdisc_ops); 1708 register_qdisc(&pfifo_qdisc_ops);
1709 register_qdisc(&bfifo_qdisc_ops); 1709 register_qdisc(&bfifo_qdisc_ops);
1710 register_qdisc(&pfifo_head_drop_qdisc_ops);
1710 register_qdisc(&mq_qdisc_ops); 1711 register_qdisc(&mq_qdisc_ops);
1711 proc_net_fops_create(&init_net, "psched", 0, &psched_fops); 1712 proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
1712 1713
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 69188e8358b..4b0a6cc44c7 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -43,6 +43,26 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
43 return qdisc_reshape_fail(skb, sch); 43 return qdisc_reshape_fail(skb, sch);
44} 44}
45 45
46static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
47{
48 struct sk_buff *skb_head;
49 struct fifo_sched_data *q = qdisc_priv(sch);
50
51 if (likely(skb_queue_len(&sch->q) < q->limit))
52 return qdisc_enqueue_tail(skb, sch);
53
54 /* queue full, remove one skb to fulfill the limit */
55 skb_head = qdisc_dequeue_head(sch);
56 sch->bstats.bytes -= qdisc_pkt_len(skb_head);
57 sch->bstats.packets--;
58 sch->qstats.drops++;
59 kfree_skb(skb_head);
60
61 qdisc_enqueue_tail(skb, sch);
62
63 return NET_XMIT_CN;
64}
65
46static int fifo_init(struct Qdisc *sch, struct nlattr *opt) 66static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
47{ 67{
48 struct fifo_sched_data *q = qdisc_priv(sch); 68 struct fifo_sched_data *q = qdisc_priv(sch);
@@ -108,6 +128,20 @@ struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
108}; 128};
109EXPORT_SYMBOL(bfifo_qdisc_ops); 129EXPORT_SYMBOL(bfifo_qdisc_ops);
110 130
131struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
132 .id = "pfifo_head_drop",
133 .priv_size = sizeof(struct fifo_sched_data),
134 .enqueue = pfifo_tail_enqueue,
135 .dequeue = qdisc_dequeue_head,
136 .peek = qdisc_peek_head,
137 .drop = qdisc_queue_drop_head,
138 .init = fifo_init,
139 .reset = qdisc_reset_queue,
140 .change = fifo_init,
141 .dump = fifo_dump,
142 .owner = THIS_MODULE,
143};
144
111/* Pass size change message down to embedded FIFO */ 145/* Pass size change message down to embedded FIFO */
112int fifo_set_limit(struct Qdisc *q, unsigned int limit) 146int fifo_set_limit(struct Qdisc *q, unsigned int limit)
113{ 147{
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 13a6fba4107..bef13373168 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -186,7 +186,6 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
186 addr->valid = 1; 186 addr->valid = 1;
187 187
188 INIT_LIST_HEAD(&addr->list); 188 INIT_LIST_HEAD(&addr->list);
189 INIT_RCU_HEAD(&addr->rcu);
190 189
191 /* We always hold a socket lock when calling this function, 190 /* We always hold a socket lock when calling this function,
192 * and that acts as a writer synchronizing lock. 191 * and that acts as a writer synchronizing lock.
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index cc50fbe9929..1d7ac70ba39 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -381,7 +381,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
381 addr->a.v6.sin6_scope_id = dev->ifindex; 381 addr->a.v6.sin6_scope_id = dev->ifindex;
382 addr->valid = 1; 382 addr->valid = 1;
383 INIT_LIST_HEAD(&addr->list); 383 INIT_LIST_HEAD(&addr->list);
384 INIT_RCU_HEAD(&addr->rcu);
385 list_add_tail(&addr->list, addrlist); 384 list_add_tail(&addr->list, addrlist);
386 } 385 }
387 } 386 }
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index d093cbfeaac..784bcc9a979 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -40,7 +40,7 @@
40#include <net/sctp/sctp.h> 40#include <net/sctp/sctp.h>
41#include <net/ip.h> /* for snmp_fold_field */ 41#include <net/ip.h> /* for snmp_fold_field */
42 42
43static struct snmp_mib sctp_snmp_list[] = { 43static const struct snmp_mib sctp_snmp_list[] = {
44 SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB), 44 SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB),
45 SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS), 45 SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS),
46 SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS), 46 SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS),
@@ -83,7 +83,7 @@ static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
83 83
84 for (i = 0; sctp_snmp_list[i].name != NULL; i++) 84 for (i = 0; sctp_snmp_list[i].name != NULL; i++)
85 seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name, 85 seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
86 snmp_fold_field((void **)sctp_statistics, 86 snmp_fold_field((void __percpu **)sctp_statistics,
87 sctp_snmp_list[i].entry)); 87 sctp_snmp_list[i].entry));
88 88
89 return 0; 89 return 0;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index a3c8988758b..e771690f6d5 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -188,7 +188,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
188 addr->a.v4.sin_addr.s_addr = ifa->ifa_local; 188 addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
189 addr->valid = 1; 189 addr->valid = 1;
190 INIT_LIST_HEAD(&addr->list); 190 INIT_LIST_HEAD(&addr->list);
191 INIT_RCU_HEAD(&addr->rcu);
192 list_add_tail(&addr->list, addrlist); 191 list_add_tail(&addr->list, addrlist);
193 } 192 }
194 } 193 }
@@ -996,12 +995,13 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
996 995
997static inline int init_sctp_mibs(void) 996static inline int init_sctp_mibs(void)
998{ 997{
999 return snmp_mib_init((void**)sctp_statistics, sizeof(struct sctp_mib)); 998 return snmp_mib_init((void __percpu **)sctp_statistics,
999 sizeof(struct sctp_mib));
1000} 1000}
1001 1001
1002static inline void cleanup_sctp_mibs(void) 1002static inline void cleanup_sctp_mibs(void)
1003{ 1003{
1004 snmp_mib_free((void**)sctp_statistics); 1004 snmp_mib_free((void __percpu **)sctp_statistics);
1005} 1005}
1006 1006
1007static void sctp_v4_pf_init(void) 1007static void sctp_v4_pf_init(void)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 67fdac9d2d3..f6d1e59c415 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -6359,7 +6359,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
6359 struct sctp_association *asoc) 6359 struct sctp_association *asoc)
6360{ 6360{
6361 struct inet_sock *inet = inet_sk(sk); 6361 struct inet_sock *inet = inet_sk(sk);
6362 struct inet_sock *newinet = inet_sk(newsk); 6362 struct inet_sock *newinet;
6363 6363
6364 newsk->sk_type = sk->sk_type; 6364 newsk->sk_type = sk->sk_type;
6365 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 6365 newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 49278f83036..9ea45383480 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -78,7 +78,7 @@ rpc_timeout_upcall_queue(struct work_struct *work)
78} 78}
79 79
80/** 80/**
81 * rpc_queue_upcall 81 * rpc_queue_upcall - queue an upcall message to userspace
82 * @inode: inode of upcall pipe on which to queue given message 82 * @inode: inode of upcall pipe on which to queue given message
83 * @msg: message to queue 83 * @msg: message to queue
84 * 84 *
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 0b15d7250c4..53196009160 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -71,7 +71,7 @@ static struct ctl_table_root net_sysctl_ro_root = {
71 .permissions = net_ctl_ro_header_perms, 71 .permissions = net_ctl_ro_header_perms,
72}; 72};
73 73
74static int sysctl_net_init(struct net *net) 74static int __net_init sysctl_net_init(struct net *net)
75{ 75{
76 setup_sysctl_set(&net->sysctls, 76 setup_sysctl_set(&net->sysctls,
77 &net_sysctl_ro_root.default_set, 77 &net_sysctl_ro_root.default_set,
@@ -79,7 +79,7 @@ static int sysctl_net_init(struct net *net)
79 return 0; 79 return 0;
80} 80}
81 81
82static void sysctl_net_exit(struct net *net) 82static void __net_exit sysctl_net_exit(struct net *net)
83{ 83{
84 WARN_ON(!list_empty(&net->sysctls.list)); 84 WARN_ON(!list_empty(&net->sysctls.list));
85 return; 85 return;
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index 3b30d1130b6..b74f78d0c03 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -10,7 +10,7 @@ menuconfig TIPC
10 specially designed for intra cluster communication. This protocol 10 specially designed for intra cluster communication. This protocol
11 originates from Ericsson where it has been used in carrier grade 11 originates from Ericsson where it has been used in carrier grade
12 cluster applications for many years. 12 cluster applications for many years.
13 13
14 For more information about TIPC, see http://tipc.sourceforge.net. 14 For more information about TIPC, see http://tipc.sourceforge.net.
15 15
16 This protocol support is also available as a module ( = code which 16 This protocol support is also available as a module ( = code which
@@ -23,91 +23,76 @@ menuconfig TIPC
23if TIPC 23if TIPC
24 24
25config TIPC_ADVANCED 25config TIPC_ADVANCED
26 bool "TIPC: Advanced configuration" 26 bool "Advanced TIPC configuration"
27 default n 27 default n
28 help 28 help
29 Saying Y here will open some advanced configuration 29 Saying Y here will open some advanced configuration for TIPC.
30 for TIPC. Most users do not need to bother, so if 30 Most users do not need to bother; if unsure, just say N.
31 unsure, just say N.
32 31
33config TIPC_ZONES 32config TIPC_ZONES
34 int "Maximum number of zones in network" 33 int "Maximum number of zones in a network"
35 depends on TIPC_ADVANCED 34 depends on TIPC_ADVANCED
35 range 1 255
36 default "3" 36 default "3"
37 help 37 help
38 Max number of zones inside TIPC network. Max supported value 38 Specifies how many zones can be supported in a TIPC network.
39 is 255 zones, minimum is 1 39 Can range from 1 to 255 zones; default is 3.
40 40
41 Default is 3 zones in a network; setting this to higher 41 Setting this to a smaller value saves some memory;
42 allows more zones but might use more memory. 42 setting it to a higher value allows for more zones.
43 43
44config TIPC_CLUSTERS 44config TIPC_CLUSTERS
45 int "Maximum number of clusters in a zone" 45 int "Maximum number of clusters in a zone"
46 depends on TIPC_ADVANCED 46 depends on TIPC_ADVANCED
47 range 1 1
47 default "1" 48 default "1"
48 help 49 help
49 ***Only 1 (one cluster in a zone) is supported by current code. 50 Specifies how many clusters can be supported in a TIPC zone.
50 Any value set here will be overridden.***
51
52 (Max number of clusters inside TIPC zone. Max supported
53 value is 4095 clusters, minimum is 1.
54 51
55 Default is 1; setting this to smaller value might save 52 *** Currently TIPC only supports a single cluster per zone. ***
56 some memory, setting it to higher
57 allows more clusters and might consume more memory.)
58 53
59config TIPC_NODES 54config TIPC_NODES
60 int "Maximum number of nodes in cluster" 55 int "Maximum number of nodes in a cluster"
61 depends on TIPC_ADVANCED 56 depends on TIPC_ADVANCED
57 range 8 2047
62 default "255" 58 default "255"
63 help 59 help
64 Maximum number of nodes inside a TIPC cluster. Maximum 60 Specifies how many nodes can be supported in a TIPC cluster.
65 supported value is 2047 nodes, minimum is 8. 61 Can range from 8 to 2047 nodes; default is 255.
66
67 Setting this to a smaller value saves some memory,
68 setting it to higher allows more nodes.
69
70config TIPC_SLAVE_NODES
71 int "Maximum number of slave nodes in cluster"
72 depends on TIPC_ADVANCED
73 default "0"
74 help
75 ***This capability is not supported by current code.***
76
77 Maximum number of slave nodes inside a TIPC cluster. Maximum
78 supported value is 2047 nodes, minimum is 0.
79 62
80 Setting this to a smaller value saves some memory, 63 Setting this to a smaller value saves some memory;
81 setting it to higher allows more nodes. 64 setting it to higher allows for more nodes.
82 65
83config TIPC_PORTS 66config TIPC_PORTS
84 int "Maximum number of ports in a node" 67 int "Maximum number of ports in a node"
85 depends on TIPC_ADVANCED 68 depends on TIPC_ADVANCED
69 range 127 65535
86 default "8191" 70 default "8191"
87 help 71 help
88 Maximum number of ports within a node. Maximum 72 Specifies how many ports can be supported by a node.
89 supported value is 64535 nodes, minimum is 127. 73 Can range from 127 to 65535 ports; default is 8191.
90 74
91 Setting this to a smaller value saves some memory, 75 Setting this to a smaller value saves some memory,
92 setting it to higher allows more ports. 76 setting it to higher allows for more ports.
93 77
94config TIPC_LOG 78config TIPC_LOG
95 int "Size of log buffer" 79 int "Size of log buffer"
96 depends on TIPC_ADVANCED 80 depends on TIPC_ADVANCED
97 default 0 81 range 0 32768
82 default "0"
98 help 83 help
99 Size (in bytes) of TIPC's internal log buffer, which records the 84 Size (in bytes) of TIPC's internal log buffer, which records the
100 occurrence of significant events. Maximum supported value 85 occurrence of significant events. Can range from 0 to 32768 bytes;
101 is 32768 bytes, minimum is 0. 86 default is 0.
102 87
103 There is no need to enable the log buffer unless the node will be 88 There is no need to enable the log buffer unless the node will be
104 managed remotely via TIPC. 89 managed remotely via TIPC.
105 90
106config TIPC_DEBUG 91config TIPC_DEBUG
107 bool "Enable debugging support" 92 bool "Enable debug messages"
108 default n 93 default n
109 help 94 help
110 This will enable debugging of TIPC. 95 This enables debugging of TIPC.
111 96
112 Only say Y here if you are having trouble with TIPC. It will 97 Only say Y here if you are having trouble with TIPC. It will
113 enable the display of detailed information about what is going on. 98 enable the display of detailed information about what is going on.
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 3256bd7d398..52c571fedbe 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -189,11 +189,11 @@ static int __init tipc_init(void)
189 tipc_remote_management = 1; 189 tipc_remote_management = 1;
190 tipc_max_publications = 10000; 190 tipc_max_publications = 10000;
191 tipc_max_subscriptions = 2000; 191 tipc_max_subscriptions = 2000;
192 tipc_max_ports = delimit(CONFIG_TIPC_PORTS, 127, 65536); 192 tipc_max_ports = CONFIG_TIPC_PORTS;
193 tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 255); 193 tipc_max_zones = CONFIG_TIPC_ZONES;
194 tipc_max_clusters = delimit(CONFIG_TIPC_CLUSTERS, 1, 1); 194 tipc_max_clusters = CONFIG_TIPC_CLUSTERS;
195 tipc_max_nodes = delimit(CONFIG_TIPC_NODES, 8, 2047); 195 tipc_max_nodes = CONFIG_TIPC_NODES;
196 tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047); 196 tipc_max_slaves = CONFIG_TIPC_SLAVE_NODES;
197 tipc_net_id = 4711; 197 tipc_net_id = 4711;
198 198
199 if ((res = tipc_core_start())) 199 if ((res = tipc_core_start()))
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index f2551190311..3d9122e78f4 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -144,7 +144,7 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
144/* 144/*
145 * SMP locking strategy: 145 * SMP locking strategy:
146 * hash table is protected with spinlock unix_table_lock 146 * hash table is protected with spinlock unix_table_lock
147 * each socket state is protected by separate rwlock. 147 * each socket state is protected by separate spin lock.
148 */ 148 */
149 149
150static inline unsigned unix_hash_fold(__wsum n) 150static inline unsigned unix_hash_fold(__wsum n)
@@ -2224,7 +2224,7 @@ static const struct net_proto_family unix_family_ops = {
2224}; 2224};
2225 2225
2226 2226
2227static int unix_net_init(struct net *net) 2227static int __net_init unix_net_init(struct net *net)
2228{ 2228{
2229 int error = -ENOMEM; 2229 int error = -ENOMEM;
2230 2230
@@ -2243,7 +2243,7 @@ out:
2243 return error; 2243 return error;
2244} 2244}
2245 2245
2246static void unix_net_exit(struct net *net) 2246static void __net_exit unix_net_exit(struct net *net)
2247{ 2247{
2248 unix_sysctl_unregister(net); 2248 unix_sysctl_unregister(net);
2249 proc_net_remove(net, "unix"); 2249 proc_net_remove(net, "unix");
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index 708f5df6b7f..d095c7be10d 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -31,7 +31,7 @@ static struct ctl_path unix_path[] = {
31 { }, 31 { },
32}; 32};
33 33
34int unix_sysctl_register(struct net *net) 34int __net_init unix_sysctl_register(struct net *net)
35{ 35{
36 struct ctl_table *table; 36 struct ctl_table *table;
37 37
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index d3bfb6ef13a..7718657e93d 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -320,8 +320,7 @@ int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name,
320EXPORT_SYMBOL_GPL(wimax_msg); 320EXPORT_SYMBOL_GPL(wimax_msg);
321 321
322 322
323static const 323static const struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = {
324struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = {
325 [WIMAX_GNL_MSG_IFIDX] = { 324 [WIMAX_GNL_MSG_IFIDX] = {
326 .type = NLA_U32, 325 .type = NLA_U32,
327 }, 326 },
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
index 35f370091f4..4dc82a54ba3 100644
--- a/net/wimax/op-reset.c
+++ b/net/wimax/op-reset.c
@@ -91,8 +91,7 @@ int wimax_reset(struct wimax_dev *wimax_dev)
91EXPORT_SYMBOL(wimax_reset); 91EXPORT_SYMBOL(wimax_reset);
92 92
93 93
94static const 94static const struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = {
95struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = {
96 [WIMAX_GNL_RESET_IFIDX] = { 95 [WIMAX_GNL_RESET_IFIDX] = {
97 .type = NLA_U32, 96 .type = NLA_U32,
98 }, 97 },
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index ae752a64d92..e978c7136c9 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -410,8 +410,7 @@ void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
410 * just query). 410 * just query).
411 */ 411 */
412 412
413static const 413static const struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = {
414struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = {
415 [WIMAX_GNL_RFKILL_IFIDX] = { 414 [WIMAX_GNL_RFKILL_IFIDX] = {
416 .type = NLA_U32, 415 .type = NLA_U32,
417 }, 416 },
diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c
index a76b8fcb056..11ad3356eb5 100644
--- a/net/wimax/op-state-get.c
+++ b/net/wimax/op-state-get.c
@@ -33,8 +33,7 @@
33#include "debug-levels.h" 33#include "debug-levels.h"
34 34
35 35
36static const 36static const struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1] = {
37struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1] = {
38 [WIMAX_GNL_STGET_IFIDX] = { 37 [WIMAX_GNL_STGET_IFIDX] = {
39 .type = NLA_U32, 38 .type = NLA_U32,
40 }, 39 },
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index c8866412f83..813e1eaea29 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -75,8 +75,7 @@ MODULE_PARM_DESC(debug,
75 * close to where the data is generated. 75 * close to where the data is generated.
76 */ 76 */
77/* 77/*
78static const 78static const struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = {
79struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = {
80 [WIMAX_GNL_STCH_STATE_OLD] = { .type = NLA_U8 }, 79 [WIMAX_GNL_STCH_STATE_OLD] = { .type = NLA_U8 },
81 [WIMAX_GNL_STCH_STATE_NEW] = { .type = NLA_U8 }, 80 [WIMAX_GNL_STCH_STATE_NEW] = { .type = NLA_U8 },
82}; 81};
diff --git a/net/wireless/.gitignore b/net/wireless/.gitignore
new file mode 100644
index 00000000000..c33451b896d
--- /dev/null
+++ b/net/wireless/.gitignore
@@ -0,0 +1 @@
regdb.c
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 90e93a5701a..d0ee29063e5 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -94,20 +94,21 @@ config CFG80211_DEBUGFS
94 94
95 If unsure, say N. 95 If unsure, say N.
96 96
97config WIRELESS_OLD_REGULATORY 97config CFG80211_INTERNAL_REGDB
98 bool "Old wireless static regulatory definitions" 98 bool "use statically compiled regulatory rules database" if EMBEDDED
99 default n 99 default n
100 depends on CFG80211 100 depends on CFG80211
101 ---help--- 101 ---help---
102 This option enables the old static regulatory information 102 This option generates an internal data structure representing
103 and uses it within the new framework. This option is available 103 the wireless regulatory rules described in net/wireless/db.txt
104 for historical reasons and it is advised to leave it off. 104 and includes code to query that database. This is an alternative
105 to using CRDA for defining regulatory rules for the kernel.
105 106
106 For details see: 107 For details see:
107 108
108 http://wireless.kernel.org/en/developers/Regulatory 109 http://wireless.kernel.org/en/developers/Regulatory
109 110
110 Say N and if you say Y, please tell us why. The default is N. 111 Most distributions have a CRDA package. So if unsure, say N.
111 112
112config CFG80211_WEXT 113config CFG80211_WEXT
113 bool "cfg80211 wireless extensions compatibility" 114 bool "cfg80211 wireless extensions compatibility"
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index f07c8dc7aab..e77e508126f 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -13,5 +13,11 @@ cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
13cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o 13cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o
14cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o 14cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
15cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o 15cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o
16cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o
16 17
17ccflags-y += -D__CHECK_ENDIAN__ 18ccflags-y += -D__CHECK_ENDIAN__
19
20$(obj)/regdb.c: $(src)/db.txt $(src)/genregdb.awk
21 @$(AWK) -f $(srctree)/$(src)/genregdb.awk < $< > $@
22
23clean-files := regdb.c
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index a46ac6c9b36..bf1737fc9a7 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -41,44 +41,57 @@ rdev_fixed_channel(struct cfg80211_registered_device *rdev,
41 return result; 41 return result;
42} 42}
43 43
44int rdev_set_freq(struct cfg80211_registered_device *rdev, 44struct ieee80211_channel *
45 struct wireless_dev *for_wdev, 45rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
46 int freq, enum nl80211_channel_type channel_type) 46 int freq, enum nl80211_channel_type channel_type)
47{ 47{
48 struct ieee80211_channel *chan; 48 struct ieee80211_channel *chan;
49 struct ieee80211_sta_ht_cap *ht_cap; 49 struct ieee80211_sta_ht_cap *ht_cap;
50 int result;
51
52 if (rdev_fixed_channel(rdev, for_wdev))
53 return -EBUSY;
54
55 if (!rdev->ops->set_channel)
56 return -EOPNOTSUPP;
57 50
58 chan = ieee80211_get_channel(&rdev->wiphy, freq); 51 chan = ieee80211_get_channel(&rdev->wiphy, freq);
59 52
60 /* Primary channel not allowed */ 53 /* Primary channel not allowed */
61 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) 54 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
62 return -EINVAL; 55 return NULL;
63 56
64 if (channel_type == NL80211_CHAN_HT40MINUS && 57 if (channel_type == NL80211_CHAN_HT40MINUS &&
65 chan->flags & IEEE80211_CHAN_NO_HT40MINUS) 58 chan->flags & IEEE80211_CHAN_NO_HT40MINUS)
66 return -EINVAL; 59 return NULL;
67 else if (channel_type == NL80211_CHAN_HT40PLUS && 60 else if (channel_type == NL80211_CHAN_HT40PLUS &&
68 chan->flags & IEEE80211_CHAN_NO_HT40PLUS) 61 chan->flags & IEEE80211_CHAN_NO_HT40PLUS)
69 return -EINVAL; 62 return NULL;
70 63
71 ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap; 64 ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap;
72 65
73 if (channel_type != NL80211_CHAN_NO_HT) { 66 if (channel_type != NL80211_CHAN_NO_HT) {
74 if (!ht_cap->ht_supported) 67 if (!ht_cap->ht_supported)
75 return -EINVAL; 68 return NULL;
76 69
77 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || 70 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
78 ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT) 71 ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)
79 return -EINVAL; 72 return NULL;
80 } 73 }
81 74
75 return chan;
76}
77
78int rdev_set_freq(struct cfg80211_registered_device *rdev,
79 struct wireless_dev *for_wdev,
80 int freq, enum nl80211_channel_type channel_type)
81{
82 struct ieee80211_channel *chan;
83 int result;
84
85 if (rdev_fixed_channel(rdev, for_wdev))
86 return -EBUSY;
87
88 if (!rdev->ops->set_channel)
89 return -EOPNOTSUPP;
90
91 chan = rdev_freq_to_chan(rdev, freq, channel_type);
92 if (!chan)
93 return -EINVAL;
94
82 result = rdev->ops->set_channel(&rdev->wiphy, chan, channel_type); 95 result = rdev->ops->set_channel(&rdev->wiphy, chan, channel_type);
83 if (result) 96 if (result)
84 return result; 97 return result;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 92b81244248..71b6b3a9cf1 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * This is the linux wireless configuration interface. 2 * This is the linux wireless configuration interface.
3 * 3 *
4 * Copyright 2006-2009 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6 6
7#include <linux/if.h> 7#include <linux/if.h>
@@ -31,15 +31,10 @@ MODULE_AUTHOR("Johannes Berg");
31MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
32MODULE_DESCRIPTION("wireless configuration support"); 32MODULE_DESCRIPTION("wireless configuration support");
33 33
34/* RCU might be appropriate here since we usually 34/* RCU-protected (and cfg80211_mutex for writers) */
35 * only read the list, and that can happen quite
36 * often because we need to do it for each command */
37LIST_HEAD(cfg80211_rdev_list); 35LIST_HEAD(cfg80211_rdev_list);
38int cfg80211_rdev_list_generation; 36int cfg80211_rdev_list_generation;
39 37
40/*
41 * This is used to protect the cfg80211_rdev_list
42 */
43DEFINE_MUTEX(cfg80211_mutex); 38DEFINE_MUTEX(cfg80211_mutex);
44 39
45/* for debugfs */ 40/* for debugfs */
@@ -402,6 +397,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
402 rdev->wiphy.retry_long = 4; 397 rdev->wiphy.retry_long = 4;
403 rdev->wiphy.frag_threshold = (u32) -1; 398 rdev->wiphy.frag_threshold = (u32) -1;
404 rdev->wiphy.rts_threshold = (u32) -1; 399 rdev->wiphy.rts_threshold = (u32) -1;
400 rdev->wiphy.coverage_class = 0;
405 401
406 return &rdev->wiphy; 402 return &rdev->wiphy;
407} 403}
@@ -417,6 +413,18 @@ int wiphy_register(struct wiphy *wiphy)
417 int i; 413 int i;
418 u16 ifmodes = wiphy->interface_modes; 414 u16 ifmodes = wiphy->interface_modes;
419 415
416 if (WARN_ON(wiphy->addresses && !wiphy->n_addresses))
417 return -EINVAL;
418
419 if (WARN_ON(wiphy->addresses &&
420 !is_zero_ether_addr(wiphy->perm_addr) &&
421 memcmp(wiphy->perm_addr, wiphy->addresses[0].addr,
422 ETH_ALEN)))
423 return -EINVAL;
424
425 if (wiphy->addresses)
426 memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN);
427
420 /* sanity check ifmodes */ 428 /* sanity check ifmodes */
421 WARN_ON(!ifmodes); 429 WARN_ON(!ifmodes);
422 ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1; 430 ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1;
@@ -476,7 +484,7 @@ int wiphy_register(struct wiphy *wiphy)
476 /* set up regulatory info */ 484 /* set up regulatory info */
477 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); 485 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
478 486
479 list_add(&rdev->list, &cfg80211_rdev_list); 487 list_add_rcu(&rdev->list, &cfg80211_rdev_list);
480 cfg80211_rdev_list_generation++; 488 cfg80211_rdev_list_generation++;
481 489
482 mutex_unlock(&cfg80211_mutex); 490 mutex_unlock(&cfg80211_mutex);
@@ -553,7 +561,8 @@ void wiphy_unregister(struct wiphy *wiphy)
553 * it impossible to find from userspace. 561 * it impossible to find from userspace.
554 */ 562 */
555 debugfs_remove_recursive(rdev->wiphy.debugfsdir); 563 debugfs_remove_recursive(rdev->wiphy.debugfsdir);
556 list_del(&rdev->list); 564 list_del_rcu(&rdev->list);
565 synchronize_rcu();
557 566
558 /* 567 /*
559 * Try to grab rdev->mtx. If a command is still in progress, 568 * Try to grab rdev->mtx. If a command is still in progress,
@@ -669,7 +678,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
669 INIT_LIST_HEAD(&wdev->event_list); 678 INIT_LIST_HEAD(&wdev->event_list);
670 spin_lock_init(&wdev->event_lock); 679 spin_lock_init(&wdev->event_lock);
671 mutex_lock(&rdev->devlist_mtx); 680 mutex_lock(&rdev->devlist_mtx);
672 list_add(&wdev->list, &rdev->netdev_list); 681 list_add_rcu(&wdev->list, &rdev->netdev_list);
673 rdev->devlist_generation++; 682 rdev->devlist_generation++;
674 /* can only change netns with wiphy */ 683 /* can only change netns with wiphy */
675 dev->features |= NETIF_F_NETNS_LOCAL; 684 dev->features |= NETIF_F_NETNS_LOCAL;
@@ -781,13 +790,21 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
781 */ 790 */
782 if (!list_empty(&wdev->list)) { 791 if (!list_empty(&wdev->list)) {
783 sysfs_remove_link(&dev->dev.kobj, "phy80211"); 792 sysfs_remove_link(&dev->dev.kobj, "phy80211");
784 list_del_init(&wdev->list); 793 list_del_rcu(&wdev->list);
785 rdev->devlist_generation++; 794 rdev->devlist_generation++;
786#ifdef CONFIG_CFG80211_WEXT 795#ifdef CONFIG_CFG80211_WEXT
787 kfree(wdev->wext.keys); 796 kfree(wdev->wext.keys);
788#endif 797#endif
789 } 798 }
790 mutex_unlock(&rdev->devlist_mtx); 799 mutex_unlock(&rdev->devlist_mtx);
800 /*
801 * synchronise (so that we won't find this netdev
802 * from other code any more) and then clear the list
803 * head so that the above code can safely check for
804 * !list_empty() to avoid double-cleanup.
805 */
806 synchronize_rcu();
807 INIT_LIST_HEAD(&wdev->list);
791 break; 808 break;
792 case NETDEV_PRE_UP: 809 case NETDEV_PRE_UP:
793 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) 810 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 4ef3efc9410..c326a667022 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Wireless configuration interface internals. 2 * Wireless configuration interface internals.
3 * 3 *
4 * Copyright 2006-2009 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6#ifndef __NET_WIRELESS_CORE_H 6#ifndef __NET_WIRELESS_CORE_H
7#define __NET_WIRELESS_CORE_H 7#define __NET_WIRELESS_CORE_H
@@ -48,6 +48,7 @@ struct cfg80211_registered_device {
48 48
49 /* associate netdev list */ 49 /* associate netdev list */
50 struct mutex devlist_mtx; 50 struct mutex devlist_mtx;
51 /* protected by devlist_mtx or RCU */
51 struct list_head netdev_list; 52 struct list_head netdev_list;
52 int devlist_generation; 53 int devlist_generation;
53 int opencount; /* also protected by devlist_mtx */ 54 int opencount; /* also protected by devlist_mtx */
@@ -111,7 +112,8 @@ struct cfg80211_internal_bss {
111 unsigned long ts; 112 unsigned long ts;
112 struct kref ref; 113 struct kref ref;
113 atomic_t hold; 114 atomic_t hold;
114 bool ies_allocated; 115 bool beacon_ies_allocated;
116 bool proberesp_ies_allocated;
115 117
116 /* must be last because of priv member */ 118 /* must be last because of priv member */
117 struct cfg80211_bss pub; 119 struct cfg80211_bss pub;
@@ -374,10 +376,15 @@ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
374struct ieee80211_channel * 376struct ieee80211_channel *
375rdev_fixed_channel(struct cfg80211_registered_device *rdev, 377rdev_fixed_channel(struct cfg80211_registered_device *rdev,
376 struct wireless_dev *for_wdev); 378 struct wireless_dev *for_wdev);
379struct ieee80211_channel *
380rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
381 int freq, enum nl80211_channel_type channel_type);
377int rdev_set_freq(struct cfg80211_registered_device *rdev, 382int rdev_set_freq(struct cfg80211_registered_device *rdev,
378 struct wireless_dev *for_wdev, 383 struct wireless_dev *for_wdev,
379 int freq, enum nl80211_channel_type channel_type); 384 int freq, enum nl80211_channel_type channel_type);
380 385
386u16 cfg80211_calculate_bitrate(struct rate_info *rate);
387
381#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS 388#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
382#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond) 389#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
383#else 390#else
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
new file mode 100644
index 00000000000..a2fc3a09ccd
--- /dev/null
+++ b/net/wireless/db.txt
@@ -0,0 +1,17 @@
1#
2# This file is a placeholder to prevent accidental build breakage if someone
3# enables CONFIG_CFG80211_INTERNAL_REGDB. Almost no one actually needs to
4# enable that build option.
5#
6# You should be using CRDA instead. It is even better if you use the CRDA
7# package provided by your distribution, since they will probably keep it
8# up-to-date on your behalf.
9#
10# If you _really_ intend to use CONFIG_CFG80211_INTERNAL_REGDB then you will
11# need to replace this file with one containing appropriately formatted
12# regulatory rules that cover the regulatory domains you will be using. Your
13# best option is to extract the db.txt file from the wireless-regdb git
14# repository:
15#
16# git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-regdb.git
17#
diff --git a/net/wireless/genregdb.awk b/net/wireless/genregdb.awk
new file mode 100644
index 00000000000..3cc9e69880a
--- /dev/null
+++ b/net/wireless/genregdb.awk
@@ -0,0 +1,118 @@
1#!/usr/bin/awk -f
2#
3# genregdb.awk -- generate regdb.c from db.txt
4#
5# Actually, it reads from stdin (presumed to be db.txt) and writes
6# to stdout (presumed to be regdb.c), but close enough...
7#
8# Copyright 2009 John W. Linville <linville@tuxdriver.com>
9#
10# This program is free software; you can redistribute it and/or modify
11# it under the terms of the GNU General Public License version 2 as
12# published by the Free Software Foundation.
13#
14
15BEGIN {
16 active = 0
17 rules = 0;
18 print "/*"
19 print " * DO NOT EDIT -- file generated from data in db.txt"
20 print " */"
21 print ""
22 print "#include <linux/nl80211.h>"
23 print "#include <net/cfg80211.h>"
24 print ""
25 regdb = "const struct ieee80211_regdomain *reg_regdb[] = {\n"
26}
27
28/^[ \t]*#/ {
29 # Ignore
30}
31
32!active && /^[ \t]*$/ {
33 # Ignore
34}
35
36!active && /country/ {
37 country=$2
38 sub(/:/, "", country)
39 printf "static const struct ieee80211_regdomain regdom_%s = {\n", country
40 printf "\t.alpha2 = \"%s\",\n", country
41 printf "\t.reg_rules = {\n"
42 active = 1
43 regdb = regdb "\t&regdom_" country ",\n"
44}
45
46active && /^[ \t]*\(/ {
47 start = $1
48 sub(/\(/, "", start)
49 end = $3
50 bw = $5
51 sub(/\),/, "", bw)
52 gain = $6
53 sub(/\(/, "", gain)
54 sub(/,/, "", gain)
55 power = $7
56 sub(/\)/, "", power)
57 sub(/,/, "", power)
58 # power might be in mW...
59 units = $8
60 sub(/\)/, "", units)
61 sub(/,/, "", units)
62 if (units == "mW") {
63 if (power == 100) {
64 power = 20
65 } else if (power == 200) {
66 power = 23
67 } else if (power == 500) {
68 power = 27
69 } else if (power == 1000) {
70 power = 30
71 } else {
72 print "Unknown power value in database!"
73 }
74 }
75 flagstr = ""
76 for (i=8; i<=NF; i++)
77 flagstr = flagstr $i
78 split(flagstr, flagarray, ",")
79 flags = ""
80 for (arg in flagarray) {
81 if (flagarray[arg] == "NO-OFDM") {
82 flags = flags "\n\t\t\tNL80211_RRF_NO_OFDM | "
83 } else if (flagarray[arg] == "NO-CCK") {
84 flags = flags "\n\t\t\tNL80211_RRF_NO_CCK | "
85 } else if (flagarray[arg] == "NO-INDOOR") {
86 flags = flags "\n\t\t\tNL80211_RRF_NO_INDOOR | "
87 } else if (flagarray[arg] == "NO-OUTDOOR") {
88 flags = flags "\n\t\t\tNL80211_RRF_NO_OUTDOOR | "
89 } else if (flagarray[arg] == "DFS") {
90 flags = flags "\n\t\t\tNL80211_RRF_DFS | "
91 } else if (flagarray[arg] == "PTP-ONLY") {
92 flags = flags "\n\t\t\tNL80211_RRF_PTP_ONLY | "
93 } else if (flagarray[arg] == "PTMP-ONLY") {
94 flags = flags "\n\t\t\tNL80211_RRF_PTMP_ONLY | "
95 } else if (flagarray[arg] == "PASSIVE-SCAN") {
96 flags = flags "\n\t\t\tNL80211_RRF_PASSIVE_SCAN | "
97 } else if (flagarray[arg] == "NO-IBSS") {
98 flags = flags "\n\t\t\tNL80211_RRF_NO_IBSS | "
99 }
100 }
101 flags = flags "0"
102 printf "\t\tREG_RULE(%d, %d, %d, %d, %d, %s),\n", start, end, bw, gain, power, flags
103 rules++
104}
105
106active && /^[ \t]*$/ {
107 active = 0
108 printf "\t},\n"
109 printf "\t.n_reg_rules = %d\n", rules
110 printf "};\n\n"
111 rules = 0;
112}
113
114END {
115 print regdb "};"
116 print ""
117 print "int reg_regdb_size = ARRAY_SIZE(reg_regdb);"
118}
diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c
index 2301dc1edc4..b7fa31d5fd1 100644
--- a/net/wireless/lib80211_crypt_ccmp.c
+++ b/net/wireless/lib80211_crypt_ccmp.c
@@ -237,7 +237,6 @@ static int lib80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
237 return -1; 237 return -1;
238 238
239 pos = skb->data + hdr_len + CCMP_HDR_LEN; 239 pos = skb->data + hdr_len + CCMP_HDR_LEN;
240 mic = skb_put(skb, CCMP_MIC_LEN);
241 hdr = (struct ieee80211_hdr *)skb->data; 240 hdr = (struct ieee80211_hdr *)skb->data;
242 ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); 241 ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
243 242
@@ -257,6 +256,7 @@ static int lib80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
257 pos += len; 256 pos += len;
258 } 257 }
259 258
259 mic = skb_put(skb, CCMP_MIC_LEN);
260 for (i = 0; i < CCMP_MIC_LEN; i++) 260 for (i = 0; i < CCMP_MIC_LEN; i++)
261 mic[i] = b[i] ^ s0[i]; 261 mic[i] = b[i] ^ s0[i];
262 262
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index c36287399d7..8cbdb32ff31 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -36,6 +36,8 @@ MODULE_AUTHOR("Jouni Malinen");
36MODULE_DESCRIPTION("lib80211 crypt: TKIP"); 36MODULE_DESCRIPTION("lib80211 crypt: TKIP");
37MODULE_LICENSE("GPL"); 37MODULE_LICENSE("GPL");
38 38
39#define TKIP_HDR_LEN 8
40
39struct lib80211_tkip_data { 41struct lib80211_tkip_data {
40#define TKIP_KEY_LEN 32 42#define TKIP_KEY_LEN 32
41 u8 key[TKIP_KEY_LEN]; 43 u8 key[TKIP_KEY_LEN];
@@ -314,13 +316,12 @@ static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
314 u8 * rc4key, int keylen, void *priv) 316 u8 * rc4key, int keylen, void *priv)
315{ 317{
316 struct lib80211_tkip_data *tkey = priv; 318 struct lib80211_tkip_data *tkey = priv;
317 int len;
318 u8 *pos; 319 u8 *pos;
319 struct ieee80211_hdr *hdr; 320 struct ieee80211_hdr *hdr;
320 321
321 hdr = (struct ieee80211_hdr *)skb->data; 322 hdr = (struct ieee80211_hdr *)skb->data;
322 323
323 if (skb_headroom(skb) < 8 || skb->len < hdr_len) 324 if (skb_headroom(skb) < TKIP_HDR_LEN || skb->len < hdr_len)
324 return -1; 325 return -1;
325 326
326 if (rc4key == NULL || keylen < 16) 327 if (rc4key == NULL || keylen < 16)
@@ -333,9 +334,8 @@ static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
333 } 334 }
334 tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16); 335 tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16);
335 336
336 len = skb->len - hdr_len; 337 pos = skb_push(skb, TKIP_HDR_LEN);
337 pos = skb_push(skb, 8); 338 memmove(pos, pos + TKIP_HDR_LEN, hdr_len);
338 memmove(pos, pos + 8, hdr_len);
339 pos += hdr_len; 339 pos += hdr_len;
340 340
341 *pos++ = *rc4key; 341 *pos++ = *rc4key;
@@ -353,7 +353,7 @@ static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
353 tkey->tx_iv32++; 353 tkey->tx_iv32++;
354 } 354 }
355 355
356 return 8; 356 return TKIP_HDR_LEN;
357} 357}
358 358
359static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) 359static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
@@ -384,9 +384,8 @@ static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
384 if ((lib80211_tkip_hdr(skb, hdr_len, rc4key, 16, priv)) < 0) 384 if ((lib80211_tkip_hdr(skb, hdr_len, rc4key, 16, priv)) < 0)
385 return -1; 385 return -1;
386 386
387 icv = skb_put(skb, 4);
388
389 crc = ~crc32_le(~0, pos, len); 387 crc = ~crc32_le(~0, pos, len);
388 icv = skb_put(skb, 4);
390 icv[0] = crc; 389 icv[0] = crc;
391 icv[1] = crc >> 8; 390 icv[1] = crc >> 8;
392 icv[2] = crc >> 16; 391 icv[2] = crc >> 16;
@@ -434,7 +433,7 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
434 return -1; 433 return -1;
435 } 434 }
436 435
437 if (skb->len < hdr_len + 8 + 4) 436 if (skb->len < hdr_len + TKIP_HDR_LEN + 4)
438 return -1; 437 return -1;
439 438
440 pos = skb->data + hdr_len; 439 pos = skb->data + hdr_len;
@@ -462,7 +461,7 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
462 } 461 }
463 iv16 = (pos[0] << 8) | pos[2]; 462 iv16 = (pos[0] << 8) | pos[2];
464 iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24); 463 iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24);
465 pos += 8; 464 pos += TKIP_HDR_LEN;
466 465
467 if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) { 466 if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) {
468#ifdef CONFIG_LIB80211_DEBUG 467#ifdef CONFIG_LIB80211_DEBUG
@@ -523,8 +522,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
523 tkey->rx_iv16_new = iv16; 522 tkey->rx_iv16_new = iv16;
524 523
525 /* Remove IV and ICV */ 524 /* Remove IV and ICV */
526 memmove(skb->data + 8, skb->data, hdr_len); 525 memmove(skb->data + TKIP_HDR_LEN, skb->data, hdr_len);
527 skb_pull(skb, 8); 526 skb_pull(skb, TKIP_HDR_LEN);
528 skb_trim(skb, skb->len - 4); 527 skb_trim(skb, skb->len - 4);
529 528
530 return keyidx; 529 return keyidx;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 82e6002c8d6..94d151f6f73 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -148,22 +148,23 @@ void __cfg80211_send_deauth(struct net_device *dev,
148 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 148 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
149 const u8 *bssid = mgmt->bssid; 149 const u8 *bssid = mgmt->bssid;
150 int i; 150 int i;
151 bool found = false;
151 152
152 ASSERT_WDEV_LOCK(wdev); 153 ASSERT_WDEV_LOCK(wdev);
153 154
154 nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
155
156 if (wdev->current_bss && 155 if (wdev->current_bss &&
157 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { 156 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
158 cfg80211_unhold_bss(wdev->current_bss); 157 cfg80211_unhold_bss(wdev->current_bss);
159 cfg80211_put_bss(&wdev->current_bss->pub); 158 cfg80211_put_bss(&wdev->current_bss->pub);
160 wdev->current_bss = NULL; 159 wdev->current_bss = NULL;
160 found = true;
161 } else for (i = 0; i < MAX_AUTH_BSSES; i++) { 161 } else for (i = 0; i < MAX_AUTH_BSSES; i++) {
162 if (wdev->auth_bsses[i] && 162 if (wdev->auth_bsses[i] &&
163 memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) { 163 memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) {
164 cfg80211_unhold_bss(wdev->auth_bsses[i]); 164 cfg80211_unhold_bss(wdev->auth_bsses[i]);
165 cfg80211_put_bss(&wdev->auth_bsses[i]->pub); 165 cfg80211_put_bss(&wdev->auth_bsses[i]->pub);
166 wdev->auth_bsses[i] = NULL; 166 wdev->auth_bsses[i] = NULL;
167 found = true;
167 break; 168 break;
168 } 169 }
169 if (wdev->authtry_bsses[i] && 170 if (wdev->authtry_bsses[i] &&
@@ -171,10 +172,16 @@ void __cfg80211_send_deauth(struct net_device *dev,
171 cfg80211_unhold_bss(wdev->authtry_bsses[i]); 172 cfg80211_unhold_bss(wdev->authtry_bsses[i]);
172 cfg80211_put_bss(&wdev->authtry_bsses[i]->pub); 173 cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
173 wdev->authtry_bsses[i] = NULL; 174 wdev->authtry_bsses[i] = NULL;
175 found = true;
174 break; 176 break;
175 } 177 }
176 } 178 }
177 179
180 if (!found)
181 return;
182
183 nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
184
178 if (wdev->sme_state == CFG80211_SME_CONNECTED) { 185 if (wdev->sme_state == CFG80211_SME_CONNECTED) {
179 u16 reason_code; 186 u16 reason_code;
180 bool from_ap; 187 bool from_ap;
@@ -684,3 +691,40 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
684 } 691 }
685 } 692 }
686} 693}
694
695void cfg80211_ready_on_channel(struct net_device *dev, u64 cookie,
696 struct ieee80211_channel *chan,
697 enum nl80211_channel_type channel_type,
698 unsigned int duration, gfp_t gfp)
699{
700 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
701 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
702
703 nl80211_send_remain_on_channel(rdev, dev, cookie, chan, channel_type,
704 duration, gfp);
705}
706EXPORT_SYMBOL(cfg80211_ready_on_channel);
707
708void cfg80211_remain_on_channel_expired(struct net_device *dev,
709 u64 cookie,
710 struct ieee80211_channel *chan,
711 enum nl80211_channel_type channel_type,
712 gfp_t gfp)
713{
714 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
715 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
716
717 nl80211_send_remain_on_channel_cancel(rdev, dev, cookie, chan,
718 channel_type, gfp);
719}
720EXPORT_SYMBOL(cfg80211_remain_on_channel_expired);
721
722void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
723 struct station_info *sinfo, gfp_t gfp)
724{
725 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
726 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
727
728 nl80211_send_sta_event(rdev, dev, mac_addr, sinfo, gfp);
729}
730EXPORT_SYMBOL(cfg80211_new_sta);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a6028433e3a..a001ea32cb7 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -58,7 +58,7 @@ static int get_rdev_dev_by_info_ifindex(struct genl_info *info,
58} 58}
59 59
60/* policy for the attributes */ 60/* policy for the attributes */
61static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = { 61static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
62 [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, 62 [NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
63 [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, 63 [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING,
64 .len = 20-1 }, 64 .len = 20-1 },
@@ -69,6 +69,7 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
69 [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 }, 69 [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 },
70 [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 }, 70 [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 },
71 [NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 }, 71 [NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 },
72 [NL80211_ATTR_WIPHY_COVERAGE_CLASS] = { .type = NLA_U8 },
72 73
73 [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 }, 74 [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 },
74 [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, 75 [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
@@ -141,11 +142,13 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
141 [NL80211_ATTR_4ADDR] = { .type = NLA_U8 }, 142 [NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
142 [NL80211_ATTR_PMKID] = { .type = NLA_BINARY, 143 [NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
143 .len = WLAN_PMKID_LEN }, 144 .len = WLAN_PMKID_LEN },
145 [NL80211_ATTR_DURATION] = { .type = NLA_U32 },
146 [NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
147 [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
144}; 148};
145 149
146/* policy for the attributes */ 150/* policy for the attributes */
147static struct nla_policy 151static const struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] = {
148nl80211_key_policy[NL80211_KEY_MAX + 1] __read_mostly = {
149 [NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN }, 152 [NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN },
150 [NL80211_KEY_IDX] = { .type = NLA_U8 }, 153 [NL80211_KEY_IDX] = { .type = NLA_U8 },
151 [NL80211_KEY_CIPHER] = { .type = NLA_U32 }, 154 [NL80211_KEY_CIPHER] = { .type = NLA_U32 },
@@ -442,6 +445,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
442 dev->wiphy.frag_threshold); 445 dev->wiphy.frag_threshold);
443 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD, 446 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
444 dev->wiphy.rts_threshold); 447 dev->wiphy.rts_threshold);
448 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
449 dev->wiphy.coverage_class);
445 450
446 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, 451 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
447 dev->wiphy.max_scan_ssids); 452 dev->wiphy.max_scan_ssids);
@@ -569,6 +574,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
569 CMD(set_pmksa, SET_PMKSA); 574 CMD(set_pmksa, SET_PMKSA);
570 CMD(del_pmksa, DEL_PMKSA); 575 CMD(del_pmksa, DEL_PMKSA);
571 CMD(flush_pmksa, FLUSH_PMKSA); 576 CMD(flush_pmksa, FLUSH_PMKSA);
577 CMD(remain_on_channel, REMAIN_ON_CHANNEL);
578 CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
572 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) { 579 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
573 i++; 580 i++;
574 NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); 581 NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
@@ -681,6 +688,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
681 u32 changed; 688 u32 changed;
682 u8 retry_short = 0, retry_long = 0; 689 u8 retry_short = 0, retry_long = 0;
683 u32 frag_threshold = 0, rts_threshold = 0; 690 u32 frag_threshold = 0, rts_threshold = 0;
691 u8 coverage_class = 0;
684 692
685 rtnl_lock(); 693 rtnl_lock();
686 694
@@ -803,9 +811,16 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
803 changed |= WIPHY_PARAM_RTS_THRESHOLD; 811 changed |= WIPHY_PARAM_RTS_THRESHOLD;
804 } 812 }
805 813
814 if (info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]) {
815 coverage_class = nla_get_u8(
816 info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]);
817 changed |= WIPHY_PARAM_COVERAGE_CLASS;
818 }
819
806 if (changed) { 820 if (changed) {
807 u8 old_retry_short, old_retry_long; 821 u8 old_retry_short, old_retry_long;
808 u32 old_frag_threshold, old_rts_threshold; 822 u32 old_frag_threshold, old_rts_threshold;
823 u8 old_coverage_class;
809 824
810 if (!rdev->ops->set_wiphy_params) { 825 if (!rdev->ops->set_wiphy_params) {
811 result = -EOPNOTSUPP; 826 result = -EOPNOTSUPP;
@@ -816,6 +831,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
816 old_retry_long = rdev->wiphy.retry_long; 831 old_retry_long = rdev->wiphy.retry_long;
817 old_frag_threshold = rdev->wiphy.frag_threshold; 832 old_frag_threshold = rdev->wiphy.frag_threshold;
818 old_rts_threshold = rdev->wiphy.rts_threshold; 833 old_rts_threshold = rdev->wiphy.rts_threshold;
834 old_coverage_class = rdev->wiphy.coverage_class;
819 835
820 if (changed & WIPHY_PARAM_RETRY_SHORT) 836 if (changed & WIPHY_PARAM_RETRY_SHORT)
821 rdev->wiphy.retry_short = retry_short; 837 rdev->wiphy.retry_short = retry_short;
@@ -825,6 +841,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
825 rdev->wiphy.frag_threshold = frag_threshold; 841 rdev->wiphy.frag_threshold = frag_threshold;
826 if (changed & WIPHY_PARAM_RTS_THRESHOLD) 842 if (changed & WIPHY_PARAM_RTS_THRESHOLD)
827 rdev->wiphy.rts_threshold = rts_threshold; 843 rdev->wiphy.rts_threshold = rts_threshold;
844 if (changed & WIPHY_PARAM_COVERAGE_CLASS)
845 rdev->wiphy.coverage_class = coverage_class;
828 846
829 result = rdev->ops->set_wiphy_params(&rdev->wiphy, changed); 847 result = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
830 if (result) { 848 if (result) {
@@ -832,6 +850,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
832 rdev->wiphy.retry_long = old_retry_long; 850 rdev->wiphy.retry_long = old_retry_long;
833 rdev->wiphy.frag_threshold = old_frag_threshold; 851 rdev->wiphy.frag_threshold = old_frag_threshold;
834 rdev->wiphy.rts_threshold = old_rts_threshold; 852 rdev->wiphy.rts_threshold = old_rts_threshold;
853 rdev->wiphy.coverage_class = old_coverage_class;
835 } 854 }
836 } 855 }
837 856
@@ -1637,42 +1656,9 @@ static int parse_station_flags(struct genl_info *info,
1637 return 0; 1656 return 0;
1638} 1657}
1639 1658
1640static u16 nl80211_calculate_bitrate(struct rate_info *rate)
1641{
1642 int modulation, streams, bitrate;
1643
1644 if (!(rate->flags & RATE_INFO_FLAGS_MCS))
1645 return rate->legacy;
1646
1647 /* the formula below does only work for MCS values smaller than 32 */
1648 if (rate->mcs >= 32)
1649 return 0;
1650
1651 modulation = rate->mcs & 7;
1652 streams = (rate->mcs >> 3) + 1;
1653
1654 bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
1655 13500000 : 6500000;
1656
1657 if (modulation < 4)
1658 bitrate *= (modulation + 1);
1659 else if (modulation == 4)
1660 bitrate *= (modulation + 2);
1661 else
1662 bitrate *= (modulation + 3);
1663
1664 bitrate *= streams;
1665
1666 if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
1667 bitrate = (bitrate / 9) * 10;
1668
1669 /* do NOT round down here */
1670 return (bitrate + 50000) / 100000;
1671}
1672
1673static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, 1659static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
1674 int flags, struct net_device *dev, 1660 int flags, struct net_device *dev,
1675 u8 *mac_addr, struct station_info *sinfo) 1661 const u8 *mac_addr, struct station_info *sinfo)
1676{ 1662{
1677 void *hdr; 1663 void *hdr;
1678 struct nlattr *sinfoattr, *txrate; 1664 struct nlattr *sinfoattr, *txrate;
@@ -1716,8 +1702,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
1716 if (!txrate) 1702 if (!txrate)
1717 goto nla_put_failure; 1703 goto nla_put_failure;
1718 1704
1719 /* nl80211_calculate_bitrate will return 0 for mcs >= 32 */ 1705 /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
1720 bitrate = nl80211_calculate_bitrate(&sinfo->txrate); 1706 bitrate = cfg80211_calculate_bitrate(&sinfo->txrate);
1721 if (bitrate > 0) 1707 if (bitrate > 0)
1722 NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate); 1708 NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
1723 1709
@@ -2514,8 +2500,7 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
2514 return err; 2500 return err;
2515} 2501}
2516 2502
2517static const struct nla_policy 2503static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
2518 reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
2519 [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 }, 2504 [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 },
2520 [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 }, 2505 [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 },
2521 [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 }, 2506 [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 },
@@ -2583,12 +2568,6 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
2583 2568
2584 data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); 2569 data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
2585 2570
2586#ifdef CONFIG_WIRELESS_OLD_REGULATORY
2587 /* We ignore world regdom requests with the old regdom setup */
2588 if (is_world_regdom(data))
2589 return -EINVAL;
2590#endif
2591
2592 r = regulatory_hint_user(data); 2571 r = regulatory_hint_user(data);
2593 2572
2594 return r; 2573 return r;
@@ -2690,8 +2669,7 @@ do {\
2690 } \ 2669 } \
2691} while (0);\ 2670} while (0);\
2692 2671
2693static struct nla_policy 2672static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = {
2694nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] __read_mostly = {
2695 [NL80211_MESHCONF_RETRY_TIMEOUT] = { .type = NLA_U16 }, 2673 [NL80211_MESHCONF_RETRY_TIMEOUT] = { .type = NLA_U16 },
2696 [NL80211_MESHCONF_CONFIRM_TIMEOUT] = { .type = NLA_U16 }, 2674 [NL80211_MESHCONF_CONFIRM_TIMEOUT] = { .type = NLA_U16 },
2697 [NL80211_MESHCONF_HOLDING_TIMEOUT] = { .type = NLA_U16 }, 2675 [NL80211_MESHCONF_HOLDING_TIMEOUT] = { .type = NLA_U16 },
@@ -3182,6 +3160,10 @@ static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
3182 NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS, 3160 NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS,
3183 res->len_information_elements, 3161 res->len_information_elements,
3184 res->information_elements); 3162 res->information_elements);
3163 if (res->beacon_ies && res->len_beacon_ies &&
3164 res->beacon_ies != res->information_elements)
3165 NLA_PUT(msg, NL80211_BSS_BEACON_IES,
3166 res->len_beacon_ies, res->beacon_ies);
3185 if (res->tsf) 3167 if (res->tsf)
3186 NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf); 3168 NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf);
3187 if (res->beacon_interval) 3169 if (res->beacon_interval)
@@ -3586,6 +3568,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
3586{ 3568{
3587 struct cfg80211_registered_device *rdev; 3569 struct cfg80211_registered_device *rdev;
3588 struct net_device *dev; 3570 struct net_device *dev;
3571 struct wireless_dev *wdev;
3589 struct cfg80211_crypto_settings crypto; 3572 struct cfg80211_crypto_settings crypto;
3590 struct ieee80211_channel *chan, *fixedchan; 3573 struct ieee80211_channel *chan, *fixedchan;
3591 const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL; 3574 const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL;
@@ -3631,7 +3614,8 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
3631 } 3614 }
3632 3615
3633 mutex_lock(&rdev->devlist_mtx); 3616 mutex_lock(&rdev->devlist_mtx);
3634 fixedchan = rdev_fixed_channel(rdev, NULL); 3617 wdev = dev->ieee80211_ptr;
3618 fixedchan = rdev_fixed_channel(rdev, wdev);
3635 if (fixedchan && chan != fixedchan) { 3619 if (fixedchan && chan != fixedchan) {
3636 err = -EBUSY; 3620 err = -EBUSY;
3637 mutex_unlock(&rdev->devlist_mtx); 3621 mutex_unlock(&rdev->devlist_mtx);
@@ -4322,6 +4306,245 @@ static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info)
4322 4306
4323} 4307}
4324 4308
4309static int nl80211_remain_on_channel(struct sk_buff *skb,
4310 struct genl_info *info)
4311{
4312 struct cfg80211_registered_device *rdev;
4313 struct net_device *dev;
4314 struct ieee80211_channel *chan;
4315 struct sk_buff *msg;
4316 void *hdr;
4317 u64 cookie;
4318 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
4319 u32 freq, duration;
4320 int err;
4321
4322 if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
4323 !info->attrs[NL80211_ATTR_DURATION])
4324 return -EINVAL;
4325
4326 duration = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
4327
4328 /*
4329 * We should be on that channel for at least one jiffie,
4330 * and more than 5 seconds seems excessive.
4331 */
4332 if (!duration || !msecs_to_jiffies(duration) || duration > 5000)
4333 return -EINVAL;
4334
4335 rtnl_lock();
4336
4337 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4338 if (err)
4339 goto unlock_rtnl;
4340
4341 if (!rdev->ops->remain_on_channel) {
4342 err = -EOPNOTSUPP;
4343 goto out;
4344 }
4345
4346 if (!netif_running(dev)) {
4347 err = -ENETDOWN;
4348 goto out;
4349 }
4350
4351 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
4352 channel_type = nla_get_u32(
4353 info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
4354 if (channel_type != NL80211_CHAN_NO_HT &&
4355 channel_type != NL80211_CHAN_HT20 &&
4356 channel_type != NL80211_CHAN_HT40PLUS &&
4357 channel_type != NL80211_CHAN_HT40MINUS)
4358 err = -EINVAL;
4359 goto out;
4360 }
4361
4362 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
4363 chan = rdev_freq_to_chan(rdev, freq, channel_type);
4364 if (chan == NULL) {
4365 err = -EINVAL;
4366 goto out;
4367 }
4368
4369 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
4370 if (!msg) {
4371 err = -ENOMEM;
4372 goto out;
4373 }
4374
4375 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
4376 NL80211_CMD_REMAIN_ON_CHANNEL);
4377
4378 if (IS_ERR(hdr)) {
4379 err = PTR_ERR(hdr);
4380 goto free_msg;
4381 }
4382
4383 err = rdev->ops->remain_on_channel(&rdev->wiphy, dev, chan,
4384 channel_type, duration, &cookie);
4385
4386 if (err)
4387 goto free_msg;
4388
4389 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
4390
4391 genlmsg_end(msg, hdr);
4392 err = genlmsg_reply(msg, info);
4393 goto out;
4394
4395 nla_put_failure:
4396 err = -ENOBUFS;
4397 free_msg:
4398 nlmsg_free(msg);
4399 out:
4400 cfg80211_unlock_rdev(rdev);
4401 dev_put(dev);
4402 unlock_rtnl:
4403 rtnl_unlock();
4404 return err;
4405}
4406
4407static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
4408 struct genl_info *info)
4409{
4410 struct cfg80211_registered_device *rdev;
4411 struct net_device *dev;
4412 u64 cookie;
4413 int err;
4414
4415 if (!info->attrs[NL80211_ATTR_COOKIE])
4416 return -EINVAL;
4417
4418 rtnl_lock();
4419
4420 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4421 if (err)
4422 goto unlock_rtnl;
4423
4424 if (!rdev->ops->cancel_remain_on_channel) {
4425 err = -EOPNOTSUPP;
4426 goto out;
4427 }
4428
4429 if (!netif_running(dev)) {
4430 err = -ENETDOWN;
4431 goto out;
4432 }
4433
4434 cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
4435
4436 err = rdev->ops->cancel_remain_on_channel(&rdev->wiphy, dev, cookie);
4437
4438 out:
4439 cfg80211_unlock_rdev(rdev);
4440 dev_put(dev);
4441 unlock_rtnl:
4442 rtnl_unlock();
4443 return err;
4444}
4445
4446static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
4447 u8 *rates, u8 rates_len)
4448{
4449 u8 i;
4450 u32 mask = 0;
4451
4452 for (i = 0; i < rates_len; i++) {
4453 int rate = (rates[i] & 0x7f) * 5;
4454 int ridx;
4455 for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
4456 struct ieee80211_rate *srate =
4457 &sband->bitrates[ridx];
4458 if (rate == srate->bitrate) {
4459 mask |= 1 << ridx;
4460 break;
4461 }
4462 }
4463 if (ridx == sband->n_bitrates)
4464 return 0; /* rate not found */
4465 }
4466
4467 return mask;
4468}
4469
4470static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
4471 [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
4472 .len = NL80211_MAX_SUPP_RATES },
4473};
4474
4475static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
4476 struct genl_info *info)
4477{
4478 struct nlattr *tb[NL80211_TXRATE_MAX + 1];
4479 struct cfg80211_registered_device *rdev;
4480 struct cfg80211_bitrate_mask mask;
4481 int err, rem, i;
4482 struct net_device *dev;
4483 struct nlattr *tx_rates;
4484 struct ieee80211_supported_band *sband;
4485
4486 if (info->attrs[NL80211_ATTR_TX_RATES] == NULL)
4487 return -EINVAL;
4488
4489 rtnl_lock();
4490
4491 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4492 if (err)
4493 goto unlock_rtnl;
4494
4495 if (!rdev->ops->set_bitrate_mask) {
4496 err = -EOPNOTSUPP;
4497 goto unlock;
4498 }
4499
4500 memset(&mask, 0, sizeof(mask));
4501 /* Default to all rates enabled */
4502 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
4503 sband = rdev->wiphy.bands[i];
4504 mask.control[i].legacy =
4505 sband ? (1 << sband->n_bitrates) - 1 : 0;
4506 }
4507
4508 /*
4509 * The nested attribute uses enum nl80211_band as the index. This maps
4510 * directly to the enum ieee80211_band values used in cfg80211.
4511 */
4512 nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem)
4513 {
4514 enum ieee80211_band band = nla_type(tx_rates);
4515 if (band < 0 || band >= IEEE80211_NUM_BANDS) {
4516 err = -EINVAL;
4517 goto unlock;
4518 }
4519 sband = rdev->wiphy.bands[band];
4520 if (sband == NULL) {
4521 err = -EINVAL;
4522 goto unlock;
4523 }
4524 nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
4525 nla_len(tx_rates), nl80211_txattr_policy);
4526 if (tb[NL80211_TXRATE_LEGACY]) {
4527 mask.control[band].legacy = rateset_to_mask(
4528 sband,
4529 nla_data(tb[NL80211_TXRATE_LEGACY]),
4530 nla_len(tb[NL80211_TXRATE_LEGACY]));
4531 if (mask.control[band].legacy == 0) {
4532 err = -EINVAL;
4533 goto unlock;
4534 }
4535 }
4536 }
4537
4538 err = rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, NULL, &mask);
4539
4540 unlock:
4541 dev_put(dev);
4542 cfg80211_unlock_rdev(rdev);
4543 unlock_rtnl:
4544 rtnl_unlock();
4545 return err;
4546}
4547
4325static struct genl_ops nl80211_ops[] = { 4548static struct genl_ops nl80211_ops[] = {
4326 { 4549 {
4327 .cmd = NL80211_CMD_GET_WIPHY, 4550 .cmd = NL80211_CMD_GET_WIPHY,
@@ -4584,8 +4807,26 @@ static struct genl_ops nl80211_ops[] = {
4584 .policy = nl80211_policy, 4807 .policy = nl80211_policy,
4585 .flags = GENL_ADMIN_PERM, 4808 .flags = GENL_ADMIN_PERM,
4586 }, 4809 },
4587 4810 {
4811 .cmd = NL80211_CMD_REMAIN_ON_CHANNEL,
4812 .doit = nl80211_remain_on_channel,
4813 .policy = nl80211_policy,
4814 .flags = GENL_ADMIN_PERM,
4815 },
4816 {
4817 .cmd = NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
4818 .doit = nl80211_cancel_remain_on_channel,
4819 .policy = nl80211_policy,
4820 .flags = GENL_ADMIN_PERM,
4821 },
4822 {
4823 .cmd = NL80211_CMD_SET_TX_BITRATE_MASK,
4824 .doit = nl80211_set_tx_bitrate_mask,
4825 .policy = nl80211_policy,
4826 .flags = GENL_ADMIN_PERM,
4827 },
4588}; 4828};
4829
4589static struct genl_multicast_group nl80211_mlme_mcgrp = { 4830static struct genl_multicast_group nl80211_mlme_mcgrp = {
4590 .name = "mlme", 4831 .name = "mlme",
4591}; 4832};
@@ -5173,6 +5414,89 @@ nla_put_failure:
5173 nlmsg_free(msg); 5414 nlmsg_free(msg);
5174} 5415}
5175 5416
5417static void nl80211_send_remain_on_chan_event(
5418 int cmd, struct cfg80211_registered_device *rdev,
5419 struct net_device *netdev, u64 cookie,
5420 struct ieee80211_channel *chan,
5421 enum nl80211_channel_type channel_type,
5422 unsigned int duration, gfp_t gfp)
5423{
5424 struct sk_buff *msg;
5425 void *hdr;
5426
5427 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
5428 if (!msg)
5429 return;
5430
5431 hdr = nl80211hdr_put(msg, 0, 0, 0, cmd);
5432 if (!hdr) {
5433 nlmsg_free(msg);
5434 return;
5435 }
5436
5437 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
5438 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
5439 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq);
5440 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type);
5441 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
5442
5443 if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL)
5444 NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration);
5445
5446 if (genlmsg_end(msg, hdr) < 0) {
5447 nlmsg_free(msg);
5448 return;
5449 }
5450
5451 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
5452 nl80211_mlme_mcgrp.id, gfp);
5453 return;
5454
5455 nla_put_failure:
5456 genlmsg_cancel(msg, hdr);
5457 nlmsg_free(msg);
5458}
5459
5460void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
5461 struct net_device *netdev, u64 cookie,
5462 struct ieee80211_channel *chan,
5463 enum nl80211_channel_type channel_type,
5464 unsigned int duration, gfp_t gfp)
5465{
5466 nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL,
5467 rdev, netdev, cookie, chan,
5468 channel_type, duration, gfp);
5469}
5470
5471void nl80211_send_remain_on_channel_cancel(
5472 struct cfg80211_registered_device *rdev, struct net_device *netdev,
5473 u64 cookie, struct ieee80211_channel *chan,
5474 enum nl80211_channel_type channel_type, gfp_t gfp)
5475{
5476 nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
5477 rdev, netdev, cookie, chan,
5478 channel_type, 0, gfp);
5479}
5480
5481void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
5482 struct net_device *dev, const u8 *mac_addr,
5483 struct station_info *sinfo, gfp_t gfp)
5484{
5485 struct sk_buff *msg;
5486
5487 msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
5488 if (!msg)
5489 return;
5490
5491 if (nl80211_send_station(msg, 0, 0, 0, dev, mac_addr, sinfo) < 0) {
5492 nlmsg_free(msg);
5493 return;
5494 }
5495
5496 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
5497 nl80211_mlme_mcgrp.id, gfp);
5498}
5499
5176/* initialisation/exit functions */ 5500/* initialisation/exit functions */
5177 5501
5178int nl80211_init(void) 5502int nl80211_init(void)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 44cc2a76a1b..14855b8fb43 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -59,4 +59,19 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
59 struct net_device *netdev, const u8 *bssid, 59 struct net_device *netdev, const u8 *bssid,
60 gfp_t gfp); 60 gfp_t gfp);
61 61
62void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
63 struct net_device *netdev,
64 u64 cookie,
65 struct ieee80211_channel *chan,
66 enum nl80211_channel_type channel_type,
67 unsigned int duration, gfp_t gfp);
68void nl80211_send_remain_on_channel_cancel(
69 struct cfg80211_registered_device *rdev, struct net_device *netdev,
70 u64 cookie, struct ieee80211_channel *chan,
71 enum nl80211_channel_type channel_type, gfp_t gfp);
72
73void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
74 struct net_device *dev, const u8 *mac_addr,
75 struct station_info *sinfo, gfp_t gfp);
76
62#endif /* __NET_WIRELESS_NL80211_H */ 77#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index f591871a7b4..1332c445d1c 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -2,6 +2,16 @@
2 * Radiotap parser 2 * Radiotap parser
3 * 3 *
4 * Copyright 2007 Andy Green <andy@warmcat.com> 4 * Copyright 2007 Andy Green <andy@warmcat.com>
5 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Alternatively, this software may be distributed under the terms of BSD
12 * license.
13 *
14 * See COPYING for more details.
5 */ 15 */
6 16
7#include <net/cfg80211.h> 17#include <net/cfg80211.h>
@@ -10,6 +20,35 @@
10 20
11/* function prototypes and related defs are in include/net/cfg80211.h */ 21/* function prototypes and related defs are in include/net/cfg80211.h */
12 22
23static const struct radiotap_align_size rtap_namespace_sizes[] = {
24 [IEEE80211_RADIOTAP_TSFT] = { .align = 8, .size = 8, },
25 [IEEE80211_RADIOTAP_FLAGS] = { .align = 1, .size = 1, },
26 [IEEE80211_RADIOTAP_RATE] = { .align = 1, .size = 1, },
27 [IEEE80211_RADIOTAP_CHANNEL] = { .align = 2, .size = 4, },
28 [IEEE80211_RADIOTAP_FHSS] = { .align = 2, .size = 2, },
29 [IEEE80211_RADIOTAP_DBM_ANTSIGNAL] = { .align = 1, .size = 1, },
30 [IEEE80211_RADIOTAP_DBM_ANTNOISE] = { .align = 1, .size = 1, },
31 [IEEE80211_RADIOTAP_LOCK_QUALITY] = { .align = 2, .size = 2, },
32 [IEEE80211_RADIOTAP_TX_ATTENUATION] = { .align = 2, .size = 2, },
33 [IEEE80211_RADIOTAP_DB_TX_ATTENUATION] = { .align = 2, .size = 2, },
34 [IEEE80211_RADIOTAP_DBM_TX_POWER] = { .align = 1, .size = 1, },
35 [IEEE80211_RADIOTAP_ANTENNA] = { .align = 1, .size = 1, },
36 [IEEE80211_RADIOTAP_DB_ANTSIGNAL] = { .align = 1, .size = 1, },
37 [IEEE80211_RADIOTAP_DB_ANTNOISE] = { .align = 1, .size = 1, },
38 [IEEE80211_RADIOTAP_RX_FLAGS] = { .align = 2, .size = 2, },
39 [IEEE80211_RADIOTAP_TX_FLAGS] = { .align = 2, .size = 2, },
40 [IEEE80211_RADIOTAP_RTS_RETRIES] = { .align = 1, .size = 1, },
41 [IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, },
42 /*
43 * add more here as they are defined in radiotap.h
44 */
45};
46
47static const struct ieee80211_radiotap_namespace radiotap_ns = {
48 .n_bits = sizeof(rtap_namespace_sizes) / sizeof(rtap_namespace_sizes[0]),
49 .align_size = rtap_namespace_sizes,
50};
51
13/** 52/**
14 * ieee80211_radiotap_iterator_init - radiotap parser iterator initialization 53 * ieee80211_radiotap_iterator_init - radiotap parser iterator initialization
15 * @iterator: radiotap_iterator to initialize 54 * @iterator: radiotap_iterator to initialize
@@ -50,9 +89,9 @@
50 */ 89 */
51 90
52int ieee80211_radiotap_iterator_init( 91int ieee80211_radiotap_iterator_init(
53 struct ieee80211_radiotap_iterator *iterator, 92 struct ieee80211_radiotap_iterator *iterator,
54 struct ieee80211_radiotap_header *radiotap_header, 93 struct ieee80211_radiotap_header *radiotap_header,
55 int max_length) 94 int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns)
56{ 95{
57 /* Linux only supports version 0 radiotap format */ 96 /* Linux only supports version 0 radiotap format */
58 if (radiotap_header->it_version) 97 if (radiotap_header->it_version)
@@ -62,19 +101,24 @@ int ieee80211_radiotap_iterator_init(
62 if (max_length < get_unaligned_le16(&radiotap_header->it_len)) 101 if (max_length < get_unaligned_le16(&radiotap_header->it_len))
63 return -EINVAL; 102 return -EINVAL;
64 103
65 iterator->rtheader = radiotap_header; 104 iterator->_rtheader = radiotap_header;
66 iterator->max_length = get_unaligned_le16(&radiotap_header->it_len); 105 iterator->_max_length = get_unaligned_le16(&radiotap_header->it_len);
67 iterator->arg_index = 0; 106 iterator->_arg_index = 0;
68 iterator->bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present); 107 iterator->_bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present);
69 iterator->arg = (u8 *)radiotap_header + sizeof(*radiotap_header); 108 iterator->_arg = (uint8_t *)radiotap_header + sizeof(*radiotap_header);
70 iterator->this_arg = NULL; 109 iterator->_reset_on_ext = 0;
110 iterator->_next_bitmap = &radiotap_header->it_present;
111 iterator->_next_bitmap++;
112 iterator->_vns = vns;
113 iterator->current_namespace = &radiotap_ns;
114 iterator->is_radiotap_ns = 1;
71 115
72 /* find payload start allowing for extended bitmap(s) */ 116 /* find payload start allowing for extended bitmap(s) */
73 117
74 if (unlikely(iterator->bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT))) { 118 if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) {
75 while (get_unaligned_le32(iterator->arg) & 119 while (get_unaligned_le32(iterator->_arg) &
76 (1 << IEEE80211_RADIOTAP_EXT)) { 120 (1 << IEEE80211_RADIOTAP_EXT)) {
77 iterator->arg += sizeof(u32); 121 iterator->_arg += sizeof(uint32_t);
78 122
79 /* 123 /*
80 * check for insanity where the present bitmaps 124 * check for insanity where the present bitmaps
@@ -82,12 +126,13 @@ int ieee80211_radiotap_iterator_init(
82 * stated radiotap header length 126 * stated radiotap header length
83 */ 127 */
84 128
85 if (((ulong)iterator->arg - 129 if ((unsigned long)iterator->_arg -
86 (ulong)iterator->rtheader) > iterator->max_length) 130 (unsigned long)iterator->_rtheader >
131 (unsigned long)iterator->_max_length)
87 return -EINVAL; 132 return -EINVAL;
88 } 133 }
89 134
90 iterator->arg += sizeof(u32); 135 iterator->_arg += sizeof(uint32_t);
91 136
92 /* 137 /*
93 * no need to check again for blowing past stated radiotap 138 * no need to check again for blowing past stated radiotap
@@ -96,12 +141,36 @@ int ieee80211_radiotap_iterator_init(
96 */ 141 */
97 } 142 }
98 143
144 iterator->this_arg = iterator->_arg;
145
99 /* we are all initialized happily */ 146 /* we are all initialized happily */
100 147
101 return 0; 148 return 0;
102} 149}
103EXPORT_SYMBOL(ieee80211_radiotap_iterator_init); 150EXPORT_SYMBOL(ieee80211_radiotap_iterator_init);
104 151
152static void find_ns(struct ieee80211_radiotap_iterator *iterator,
153 uint32_t oui, uint8_t subns)
154{
155 int i;
156
157 iterator->current_namespace = NULL;
158
159 if (!iterator->_vns)
160 return;
161
162 for (i = 0; i < iterator->_vns->n_ns; i++) {
163 if (iterator->_vns->ns[i].oui != oui)
164 continue;
165 if (iterator->_vns->ns[i].subns != subns)
166 continue;
167
168 iterator->current_namespace = &iterator->_vns->ns[i];
169 break;
170 }
171}
172
173
105 174
106/** 175/**
107 * ieee80211_radiotap_iterator_next - return next radiotap parser iterator arg 176 * ieee80211_radiotap_iterator_next - return next radiotap parser iterator arg
@@ -127,99 +196,80 @@ EXPORT_SYMBOL(ieee80211_radiotap_iterator_init);
127 */ 196 */
128 197
129int ieee80211_radiotap_iterator_next( 198int ieee80211_radiotap_iterator_next(
130 struct ieee80211_radiotap_iterator *iterator) 199 struct ieee80211_radiotap_iterator *iterator)
131{ 200{
132 201 while (1) {
133 /*
134 * small length lookup table for all radiotap types we heard of
135 * starting from b0 in the bitmap, so we can walk the payload
136 * area of the radiotap header
137 *
138 * There is a requirement to pad args, so that args
139 * of a given length must begin at a boundary of that length
140 * -- but note that compound args are allowed (eg, 2 x u16
141 * for IEEE80211_RADIOTAP_CHANNEL) so total arg length is not
142 * a reliable indicator of alignment requirement.
143 *
144 * upper nybble: content alignment for arg
145 * lower nybble: content length for arg
146 */
147
148 static const u8 rt_sizes[] = {
149 [IEEE80211_RADIOTAP_TSFT] = 0x88,
150 [IEEE80211_RADIOTAP_FLAGS] = 0x11,
151 [IEEE80211_RADIOTAP_RATE] = 0x11,
152 [IEEE80211_RADIOTAP_CHANNEL] = 0x24,
153 [IEEE80211_RADIOTAP_FHSS] = 0x22,
154 [IEEE80211_RADIOTAP_DBM_ANTSIGNAL] = 0x11,
155 [IEEE80211_RADIOTAP_DBM_ANTNOISE] = 0x11,
156 [IEEE80211_RADIOTAP_LOCK_QUALITY] = 0x22,
157 [IEEE80211_RADIOTAP_TX_ATTENUATION] = 0x22,
158 [IEEE80211_RADIOTAP_DB_TX_ATTENUATION] = 0x22,
159 [IEEE80211_RADIOTAP_DBM_TX_POWER] = 0x11,
160 [IEEE80211_RADIOTAP_ANTENNA] = 0x11,
161 [IEEE80211_RADIOTAP_DB_ANTSIGNAL] = 0x11,
162 [IEEE80211_RADIOTAP_DB_ANTNOISE] = 0x11,
163 [IEEE80211_RADIOTAP_RX_FLAGS] = 0x22,
164 [IEEE80211_RADIOTAP_TX_FLAGS] = 0x22,
165 [IEEE80211_RADIOTAP_RTS_RETRIES] = 0x11,
166 [IEEE80211_RADIOTAP_DATA_RETRIES] = 0x11,
167 /*
168 * add more here as they are defined in
169 * include/net/ieee80211_radiotap.h
170 */
171 };
172
173 /*
174 * for every radiotap entry we can at
175 * least skip (by knowing the length)...
176 */
177
178 while (iterator->arg_index < sizeof(rt_sizes)) {
179 int hit = 0; 202 int hit = 0;
180 int pad; 203 int pad, align, size, subns, vnslen;
204 uint32_t oui;
181 205
182 if (!(iterator->bitmap_shifter & 1)) 206 /* if no more EXT bits, that's it */
207 if ((iterator->_arg_index % 32) == IEEE80211_RADIOTAP_EXT &&
208 !(iterator->_bitmap_shifter & 1))
209 return -ENOENT;
210
211 if (!(iterator->_bitmap_shifter & 1))
183 goto next_entry; /* arg not present */ 212 goto next_entry; /* arg not present */
184 213
214 /* get alignment/size of data */
215 switch (iterator->_arg_index % 32) {
216 case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE:
217 case IEEE80211_RADIOTAP_EXT:
218 align = 1;
219 size = 0;
220 break;
221 case IEEE80211_RADIOTAP_VENDOR_NAMESPACE:
222 align = 2;
223 size = 6;
224 break;
225 default:
226 if (!iterator->current_namespace ||
227 iterator->_arg_index >= iterator->current_namespace->n_bits) {
228 if (iterator->current_namespace == &radiotap_ns)
229 return -ENOENT;
230 align = 0;
231 } else {
232 align = iterator->current_namespace->align_size[iterator->_arg_index].align;
233 size = iterator->current_namespace->align_size[iterator->_arg_index].size;
234 }
235 if (!align) {
236 /* skip all subsequent data */
237 iterator->_arg = iterator->_next_ns_data;
238 /* give up on this namespace */
239 iterator->current_namespace = NULL;
240 goto next_entry;
241 }
242 break;
243 }
244
185 /* 245 /*
186 * arg is present, account for alignment padding 246 * arg is present, account for alignment padding
187 * 8-bit args can be at any alignment
188 * 16-bit args must start on 16-bit boundary
189 * 32-bit args must start on 32-bit boundary
190 * 64-bit args must start on 64-bit boundary
191 * 247 *
192 * note that total arg size can differ from alignment of 248 * Note that these alignments are relative to the start
193 * elements inside arg, so we use upper nybble of length 249 * of the radiotap header. There is no guarantee
194 * table to base alignment on
195 *
196 * also note: these alignments are ** relative to the
197 * start of the radiotap header **. There is no guarantee
198 * that the radiotap header itself is aligned on any 250 * that the radiotap header itself is aligned on any
199 * kind of boundary. 251 * kind of boundary.
200 * 252 *
201 * the above is why get_unaligned() is used to dereference 253 * The above is why get_unaligned() is used to dereference
202 * multibyte elements from the radiotap area 254 * multibyte elements from the radiotap area.
203 */ 255 */
204 256
205 pad = (((ulong)iterator->arg) - 257 pad = ((unsigned long)iterator->_arg -
206 ((ulong)iterator->rtheader)) & 258 (unsigned long)iterator->_rtheader) & (align - 1);
207 ((rt_sizes[iterator->arg_index] >> 4) - 1);
208 259
209 if (pad) 260 if (pad)
210 iterator->arg += 261 iterator->_arg += align - pad;
211 (rt_sizes[iterator->arg_index] >> 4) - pad;
212 262
213 /* 263 /*
214 * this is what we will return to user, but we need to 264 * this is what we will return to user, but we need to
215 * move on first so next call has something fresh to test 265 * move on first so next call has something fresh to test
216 */ 266 */
217 iterator->this_arg_index = iterator->arg_index; 267 iterator->this_arg_index = iterator->_arg_index;
218 iterator->this_arg = iterator->arg; 268 iterator->this_arg = iterator->_arg;
219 hit = 1; 269 iterator->this_arg_size = size;
220 270
221 /* internally move on the size of this arg */ 271 /* internally move on the size of this arg */
222 iterator->arg += rt_sizes[iterator->arg_index] & 0x0f; 272 iterator->_arg += size;
223 273
224 /* 274 /*
225 * check for insanity where we are given a bitmap that 275 * check for insanity where we are given a bitmap that
@@ -228,32 +278,73 @@ int ieee80211_radiotap_iterator_next(
228 * max_length on the last arg, never exceeding it. 278 * max_length on the last arg, never exceeding it.
229 */ 279 */
230 280
231 if (((ulong)iterator->arg - (ulong)iterator->rtheader) > 281 if ((unsigned long)iterator->_arg -
232 iterator->max_length) 282 (unsigned long)iterator->_rtheader >
283 (unsigned long)iterator->_max_length)
233 return -EINVAL; 284 return -EINVAL;
234 285
235 next_entry: 286 /* these special ones are valid in each bitmap word */
236 iterator->arg_index++; 287 switch (iterator->_arg_index % 32) {
237 if (unlikely((iterator->arg_index & 31) == 0)) { 288 case IEEE80211_RADIOTAP_VENDOR_NAMESPACE:
238 /* completed current u32 bitmap */ 289 iterator->_bitmap_shifter >>= 1;
239 if (iterator->bitmap_shifter & 1) { 290 iterator->_arg_index++;
240 /* b31 was set, there is more */ 291
241 /* move to next u32 bitmap */ 292 iterator->_reset_on_ext = 1;
242 iterator->bitmap_shifter = 293
243 get_unaligned_le32(iterator->next_bitmap); 294 vnslen = get_unaligned_le16(iterator->this_arg + 4);
244 iterator->next_bitmap++; 295 iterator->_next_ns_data = iterator->_arg + vnslen;
245 } else 296 oui = (*iterator->this_arg << 16) |
246 /* no more bitmaps: end */ 297 (*(iterator->this_arg + 1) << 8) |
247 iterator->arg_index = sizeof(rt_sizes); 298 *(iterator->this_arg + 2);
248 } else /* just try the next bit */ 299 subns = *(iterator->this_arg + 3);
249 iterator->bitmap_shifter >>= 1; 300
301 find_ns(iterator, oui, subns);
302
303 iterator->is_radiotap_ns = 0;
304 /* allow parsers to show this information */
305 iterator->this_arg_index =
306 IEEE80211_RADIOTAP_VENDOR_NAMESPACE;
307 iterator->this_arg_size += vnslen;
308 if ((unsigned long)iterator->this_arg +
309 iterator->this_arg_size -
310 (unsigned long)iterator->_rtheader >
311 (unsigned long)(unsigned long)iterator->_max_length)
312 return -EINVAL;
313 hit = 1;
314 break;
315 case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE:
316 iterator->_bitmap_shifter >>= 1;
317 iterator->_arg_index++;
318
319 iterator->_reset_on_ext = 1;
320 iterator->current_namespace = &radiotap_ns;
321 iterator->is_radiotap_ns = 1;
322 break;
323 case IEEE80211_RADIOTAP_EXT:
324 /*
325 * bit 31 was set, there is more
326 * -- move to next u32 bitmap
327 */
328 iterator->_bitmap_shifter =
329 get_unaligned_le32(iterator->_next_bitmap);
330 iterator->_next_bitmap++;
331 if (iterator->_reset_on_ext)
332 iterator->_arg_index = 0;
333 else
334 iterator->_arg_index++;
335 iterator->_reset_on_ext = 0;
336 break;
337 default:
338 /* we've got a hit! */
339 hit = 1;
340 next_entry:
341 iterator->_bitmap_shifter >>= 1;
342 iterator->_arg_index++;
343 }
250 344
251 /* if we found a valid arg earlier, return it now */ 345 /* if we found a valid arg earlier, return it now */
252 if (hit) 346 if (hit)
253 return 0; 347 return 0;
254 } 348 }
255
256 /* we don't know how to handle any more args, we're done */
257 return -ENOENT;
258} 349}
259EXPORT_SYMBOL(ieee80211_radiotap_iterator_next); 350EXPORT_SYMBOL(ieee80211_radiotap_iterator_next);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 7a0754c92df..ed89c59bb43 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -40,8 +40,18 @@
40#include <net/cfg80211.h> 40#include <net/cfg80211.h>
41#include "core.h" 41#include "core.h"
42#include "reg.h" 42#include "reg.h"
43#include "regdb.h"
43#include "nl80211.h" 44#include "nl80211.h"
44 45
46#ifdef CONFIG_CFG80211_REG_DEBUG
47#define REG_DBG_PRINT(format, args...) \
48 do { \
49 printk(KERN_DEBUG format , ## args); \
50 } while (0)
51#else
52#define REG_DBG_PRINT(args...)
53#endif
54
45/* Receipt of information from last regulatory request */ 55/* Receipt of information from last regulatory request */
46static struct regulatory_request *last_request; 56static struct regulatory_request *last_request;
47 57
@@ -124,82 +134,11 @@ static const struct ieee80211_regdomain *cfg80211_world_regdom =
124 &world_regdom; 134 &world_regdom;
125 135
126static char *ieee80211_regdom = "00"; 136static char *ieee80211_regdom = "00";
137static char user_alpha2[2];
127 138
128module_param(ieee80211_regdom, charp, 0444); 139module_param(ieee80211_regdom, charp, 0444);
129MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); 140MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
130 141
131#ifdef CONFIG_WIRELESS_OLD_REGULATORY
132/*
133 * We assume 40 MHz bandwidth for the old regulatory work.
134 * We make emphasis we are using the exact same frequencies
135 * as before
136 */
137
138static const struct ieee80211_regdomain us_regdom = {
139 .n_reg_rules = 6,
140 .alpha2 = "US",
141 .reg_rules = {
142 /* IEEE 802.11b/g, channels 1..11 */
143 REG_RULE(2412-10, 2462+10, 40, 6, 27, 0),
144 /* IEEE 802.11a, channel 36..48 */
145 REG_RULE(5180-10, 5240+10, 40, 6, 17, 0),
146 /* IEEE 802.11a, channels 48..64 */
147 REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS),
148 /* IEEE 802.11a, channels 100..124 */
149 REG_RULE(5500-10, 5590+10, 40, 6, 20, NL80211_RRF_DFS),
150 /* IEEE 802.11a, channels 132..144 */
151 REG_RULE(5660-10, 5700+10, 40, 6, 20, NL80211_RRF_DFS),
152 /* IEEE 802.11a, channels 149..165, outdoor */
153 REG_RULE(5745-10, 5825+10, 40, 6, 30, 0),
154 }
155};
156
157static const struct ieee80211_regdomain jp_regdom = {
158 .n_reg_rules = 6,
159 .alpha2 = "JP",
160 .reg_rules = {
161 /* IEEE 802.11b/g, channels 1..11 */
162 REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
163 /* IEEE 802.11b/g, channels 12..13 */
164 REG_RULE(2467-10, 2472+10, 20, 6, 20, 0),
165 /* IEEE 802.11b/g, channel 14 */
166 REG_RULE(2484-10, 2484+10, 20, 6, 20, NL80211_RRF_NO_OFDM),
167 /* IEEE 802.11a, channels 36..48 */
168 REG_RULE(5180-10, 5240+10, 40, 6, 20, 0),
169 /* IEEE 802.11a, channels 52..64 */
170 REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS),
171 /* IEEE 802.11a, channels 100..144 */
172 REG_RULE(5500-10, 5700+10, 40, 6, 23, NL80211_RRF_DFS),
173 }
174};
175
176static const struct ieee80211_regdomain *static_regdom(char *alpha2)
177{
178 if (alpha2[0] == 'U' && alpha2[1] == 'S')
179 return &us_regdom;
180 if (alpha2[0] == 'J' && alpha2[1] == 'P')
181 return &jp_regdom;
182 /* Use world roaming rules for "EU", since it was a pseudo
183 domain anyway... */
184 if (alpha2[0] == 'E' && alpha2[1] == 'U')
185 return &world_regdom;
186 /* Default, world roaming rules */
187 return &world_regdom;
188}
189
190static bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
191{
192 if (rd == &us_regdom || rd == &jp_regdom || rd == &world_regdom)
193 return true;
194 return false;
195}
196#else
197static inline bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
198{
199 return false;
200}
201#endif
202
203static void reset_regdomains(void) 142static void reset_regdomains(void)
204{ 143{
205 /* avoid freeing static information or freeing something twice */ 144 /* avoid freeing static information or freeing something twice */
@@ -209,8 +148,6 @@ static void reset_regdomains(void)
209 cfg80211_world_regdom = NULL; 148 cfg80211_world_regdom = NULL;
210 if (cfg80211_regdomain == &world_regdom) 149 if (cfg80211_regdomain == &world_regdom)
211 cfg80211_regdomain = NULL; 150 cfg80211_regdomain = NULL;
212 if (is_old_static_regdom(cfg80211_regdomain))
213 cfg80211_regdomain = NULL;
214 151
215 kfree(cfg80211_regdomain); 152 kfree(cfg80211_regdomain);
216 kfree(cfg80211_world_regdom); 153 kfree(cfg80211_world_regdom);
@@ -316,6 +253,27 @@ static bool regdom_changes(const char *alpha2)
316 return true; 253 return true;
317} 254}
318 255
256/*
257 * The NL80211_REGDOM_SET_BY_USER regdom alpha2 is cached, this lets
258 * you know if a valid regulatory hint with NL80211_REGDOM_SET_BY_USER
259 * has ever been issued.
260 */
261static bool is_user_regdom_saved(void)
262{
263 if (user_alpha2[0] == '9' && user_alpha2[1] == '7')
264 return false;
265
266 /* This would indicate a mistake on the design */
267 if (WARN((!is_world_regdom(user_alpha2) &&
268 !is_an_alpha2(user_alpha2)),
269 "Unexpected user alpha2: %c%c\n",
270 user_alpha2[0],
271 user_alpha2[1]))
272 return false;
273
274 return true;
275}
276
319/** 277/**
320 * country_ie_integrity_changes - tells us if the country IE has changed 278 * country_ie_integrity_changes - tells us if the country IE has changed
321 * @checksum: checksum of country IE of fields we are interested in 279 * @checksum: checksum of country IE of fields we are interested in
@@ -335,6 +293,98 @@ static bool country_ie_integrity_changes(u32 checksum)
335 return false; 293 return false;
336} 294}
337 295
296static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd,
297 const struct ieee80211_regdomain *src_regd)
298{
299 struct ieee80211_regdomain *regd;
300 int size_of_regd = 0;
301 unsigned int i;
302
303 size_of_regd = sizeof(struct ieee80211_regdomain) +
304 ((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule));
305
306 regd = kzalloc(size_of_regd, GFP_KERNEL);
307 if (!regd)
308 return -ENOMEM;
309
310 memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
311
312 for (i = 0; i < src_regd->n_reg_rules; i++)
313 memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
314 sizeof(struct ieee80211_reg_rule));
315
316 *dst_regd = regd;
317 return 0;
318}
319
320#ifdef CONFIG_CFG80211_INTERNAL_REGDB
321struct reg_regdb_search_request {
322 char alpha2[2];
323 struct list_head list;
324};
325
326static LIST_HEAD(reg_regdb_search_list);
327static DEFINE_SPINLOCK(reg_regdb_search_lock);
328
329static void reg_regdb_search(struct work_struct *work)
330{
331 struct reg_regdb_search_request *request;
332 const struct ieee80211_regdomain *curdom, *regdom;
333 int i, r;
334
335 spin_lock(&reg_regdb_search_lock);
336 while (!list_empty(&reg_regdb_search_list)) {
337 request = list_first_entry(&reg_regdb_search_list,
338 struct reg_regdb_search_request,
339 list);
340 list_del(&request->list);
341
342 for (i=0; i<reg_regdb_size; i++) {
343 curdom = reg_regdb[i];
344
345 if (!memcmp(request->alpha2, curdom->alpha2, 2)) {
346 r = reg_copy_regd(&regdom, curdom);
347 if (r)
348 break;
349 spin_unlock(&reg_regdb_search_lock);
350 mutex_lock(&cfg80211_mutex);
351 set_regdom(regdom);
352 mutex_unlock(&cfg80211_mutex);
353 spin_lock(&reg_regdb_search_lock);
354 break;
355 }
356 }
357
358 kfree(request);
359 }
360 spin_unlock(&reg_regdb_search_lock);
361}
362
363static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
364
365static void reg_regdb_query(const char *alpha2)
366{
367 struct reg_regdb_search_request *request;
368
369 if (!alpha2)
370 return;
371
372 request = kzalloc(sizeof(struct reg_regdb_search_request), GFP_KERNEL);
373 if (!request)
374 return;
375
376 memcpy(request->alpha2, alpha2, 2);
377
378 spin_lock(&reg_regdb_search_lock);
379 list_add_tail(&request->list, &reg_regdb_search_list);
380 spin_unlock(&reg_regdb_search_lock);
381
382 schedule_work(&reg_regdb_work);
383}
384#else
385static inline void reg_regdb_query(const char *alpha2) {}
386#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
387
338/* 388/*
339 * This lets us keep regulatory code which is updated on a regulatory 389 * This lets us keep regulatory code which is updated on a regulatory
340 * basis in userspace. 390 * basis in userspace.
@@ -354,6 +404,9 @@ static int call_crda(const char *alpha2)
354 printk(KERN_INFO "cfg80211: Calling CRDA to update world " 404 printk(KERN_INFO "cfg80211: Calling CRDA to update world "
355 "regulatory domain\n"); 405 "regulatory domain\n");
356 406
407 /* query internal regulatory database (if it exists) */
408 reg_regdb_query(alpha2);
409
357 country_env[8] = alpha2[0]; 410 country_env[8] = alpha2[0];
358 country_env[9] = alpha2[1]; 411 country_env[9] = alpha2[1];
359 412
@@ -454,12 +507,212 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
454} 507}
455 508
456/* 509/*
510 * This is a work around for sanity checking ieee80211_channel_to_frequency()'s
511 * work. ieee80211_channel_to_frequency() can for example currently provide a
512 * 2 GHz channel when in fact a 5 GHz channel was desired. An example would be
513 * an AP providing channel 8 on a country IE triplet when it sent this on the
514 * 5 GHz band, that channel is designed to be channel 8 on 5 GHz, not a 2 GHz
515 * channel.
516 *
517 * This can be removed once ieee80211_channel_to_frequency() takes in a band.
518 */
519static bool chan_in_band(int chan, enum ieee80211_band band)
520{
521 int center_freq = ieee80211_channel_to_frequency(chan);
522
523 switch (band) {
524 case IEEE80211_BAND_2GHZ:
525 if (center_freq <= 2484)
526 return true;
527 return false;
528 case IEEE80211_BAND_5GHZ:
529 if (center_freq >= 5005)
530 return true;
531 return false;
532 default:
533 return false;
534 }
535}
536
537/*
538 * Some APs may send a country IE triplet for each channel they
539 * support and while this is completely overkill and silly we still
540 * need to support it. We avoid making a single rule for each channel
541 * though and to help us with this we use this helper to find the
542 * actual subband end channel. These type of country IE triplet
543 * scenerios are handled then, all yielding two regulaotry rules from
544 * parsing a country IE:
545 *
546 * [1]
547 * [2]
548 * [36]
549 * [40]
550 *
551 * [1]
552 * [2-4]
553 * [5-12]
554 * [36]
555 * [40-44]
556 *
557 * [1-4]
558 * [5-7]
559 * [36-44]
560 * [48-64]
561 *
562 * [36-36]
563 * [40-40]
564 * [44-44]
565 * [48-48]
566 * [52-52]
567 * [56-56]
568 * [60-60]
569 * [64-64]
570 * [100-100]
571 * [104-104]
572 * [108-108]
573 * [112-112]
574 * [116-116]
575 * [120-120]
576 * [124-124]
577 * [128-128]
578 * [132-132]
579 * [136-136]
580 * [140-140]
581 *
582 * Returns 0 if the IE has been found to be invalid in the middle
583 * somewhere.
584 */
585static int max_subband_chan(enum ieee80211_band band,
586 int orig_cur_chan,
587 int orig_end_channel,
588 s8 orig_max_power,
589 u8 **country_ie,
590 u8 *country_ie_len)
591{
592 u8 *triplets_start = *country_ie;
593 u8 len_at_triplet = *country_ie_len;
594 int end_subband_chan = orig_end_channel;
595
596 /*
597 * We'll deal with padding for the caller unless
598 * its not immediate and we don't process any channels
599 */
600 if (*country_ie_len == 1) {
601 *country_ie += 1;
602 *country_ie_len -= 1;
603 return orig_end_channel;
604 }
605
606 /* Move to the next triplet and then start search */
607 *country_ie += 3;
608 *country_ie_len -= 3;
609
610 if (!chan_in_band(orig_cur_chan, band))
611 return 0;
612
613 while (*country_ie_len >= 3) {
614 int end_channel = 0;
615 struct ieee80211_country_ie_triplet *triplet =
616 (struct ieee80211_country_ie_triplet *) *country_ie;
617 int cur_channel = 0, next_expected_chan;
618
619 /* means last triplet is completely unrelated to this one */
620 if (triplet->ext.reg_extension_id >=
621 IEEE80211_COUNTRY_EXTENSION_ID) {
622 *country_ie -= 3;
623 *country_ie_len += 3;
624 break;
625 }
626
627 if (triplet->chans.first_channel == 0) {
628 *country_ie += 1;
629 *country_ie_len -= 1;
630 if (*country_ie_len != 0)
631 return 0;
632 break;
633 }
634
635 if (triplet->chans.num_channels == 0)
636 return 0;
637
638 /* Monitonically increasing channel order */
639 if (triplet->chans.first_channel <= end_subband_chan)
640 return 0;
641
642 if (!chan_in_band(triplet->chans.first_channel, band))
643 return 0;
644
645 /* 2 GHz */
646 if (triplet->chans.first_channel <= 14) {
647 end_channel = triplet->chans.first_channel +
648 triplet->chans.num_channels - 1;
649 }
650 else {
651 end_channel = triplet->chans.first_channel +
652 (4 * (triplet->chans.num_channels - 1));
653 }
654
655 if (!chan_in_band(end_channel, band))
656 return 0;
657
658 if (orig_max_power != triplet->chans.max_power) {
659 *country_ie -= 3;
660 *country_ie_len += 3;
661 break;
662 }
663
664 cur_channel = triplet->chans.first_channel;
665
666 /* The key is finding the right next expected channel */
667 if (band == IEEE80211_BAND_2GHZ)
668 next_expected_chan = end_subband_chan + 1;
669 else
670 next_expected_chan = end_subband_chan + 4;
671
672 if (cur_channel != next_expected_chan) {
673 *country_ie -= 3;
674 *country_ie_len += 3;
675 break;
676 }
677
678 end_subband_chan = end_channel;
679
680 /* Move to the next one */
681 *country_ie += 3;
682 *country_ie_len -= 3;
683
684 /*
685 * Padding needs to be dealt with if we processed
686 * some channels.
687 */
688 if (*country_ie_len == 1) {
689 *country_ie += 1;
690 *country_ie_len -= 1;
691 break;
692 }
693
694 /* If seen, the IE is invalid */
695 if (*country_ie_len == 2)
696 return 0;
697 }
698
699 if (end_subband_chan == orig_end_channel) {
700 *country_ie = triplets_start;
701 *country_ie_len = len_at_triplet;
702 return orig_end_channel;
703 }
704
705 return end_subband_chan;
706}
707
708/*
457 * Converts a country IE to a regulatory domain. A regulatory domain 709 * Converts a country IE to a regulatory domain. A regulatory domain
458 * structure has a lot of information which the IE doesn't yet have, 710 * structure has a lot of information which the IE doesn't yet have,
459 * so for the other values we use upper max values as we will intersect 711 * so for the other values we use upper max values as we will intersect
460 * with our userspace regulatory agent to get lower bounds. 712 * with our userspace regulatory agent to get lower bounds.
461 */ 713 */
462static struct ieee80211_regdomain *country_ie_2_rd( 714static struct ieee80211_regdomain *country_ie_2_rd(
715 enum ieee80211_band band,
463 u8 *country_ie, 716 u8 *country_ie,
464 u8 country_ie_len, 717 u8 country_ie_len,
465 u32 *checksum) 718 u32 *checksum)
@@ -521,10 +774,29 @@ static struct ieee80211_regdomain *country_ie_2_rd(
521 continue; 774 continue;
522 } 775 }
523 776
777 /*
778 * APs can add padding to make length divisible
779 * by two, required by the spec.
780 */
781 if (triplet->chans.first_channel == 0) {
782 country_ie++;
783 country_ie_len--;
784 /* This is expected to be at the very end only */
785 if (country_ie_len != 0)
786 return NULL;
787 break;
788 }
789
790 if (triplet->chans.num_channels == 0)
791 return NULL;
792
793 if (!chan_in_band(triplet->chans.first_channel, band))
794 return NULL;
795
524 /* 2 GHz */ 796 /* 2 GHz */
525 if (triplet->chans.first_channel <= 14) 797 if (band == IEEE80211_BAND_2GHZ)
526 end_channel = triplet->chans.first_channel + 798 end_channel = triplet->chans.first_channel +
527 triplet->chans.num_channels; 799 triplet->chans.num_channels - 1;
528 else 800 else
529 /* 801 /*
530 * 5 GHz -- For example in country IEs if the first 802 * 5 GHz -- For example in country IEs if the first
@@ -539,6 +811,24 @@ static struct ieee80211_regdomain *country_ie_2_rd(
539 (4 * (triplet->chans.num_channels - 1)); 811 (4 * (triplet->chans.num_channels - 1));
540 812
541 cur_channel = triplet->chans.first_channel; 813 cur_channel = triplet->chans.first_channel;
814
815 /*
816 * Enhancement for APs that send a triplet for every channel
817 * or for whatever reason sends triplets with multiple channels
818 * separated when in fact they should be together.
819 */
820 end_channel = max_subband_chan(band,
821 cur_channel,
822 end_channel,
823 triplet->chans.max_power,
824 &country_ie,
825 &country_ie_len);
826 if (!end_channel)
827 return NULL;
828
829 if (!chan_in_band(end_channel, band))
830 return NULL;
831
542 cur_sub_max_channel = end_channel; 832 cur_sub_max_channel = end_channel;
543 833
544 /* Basic sanity check */ 834 /* Basic sanity check */
@@ -569,10 +859,13 @@ static struct ieee80211_regdomain *country_ie_2_rd(
569 859
570 last_sub_max_channel = cur_sub_max_channel; 860 last_sub_max_channel = cur_sub_max_channel;
571 861
572 country_ie += 3;
573 country_ie_len -= 3;
574 num_rules++; 862 num_rules++;
575 863
864 if (country_ie_len >= 3) {
865 country_ie += 3;
866 country_ie_len -= 3;
867 }
868
576 /* 869 /*
577 * Note: this is not a IEEE requirement but 870 * Note: this is not a IEEE requirement but
578 * simply a memory requirement 871 * simply a memory requirement
@@ -615,6 +908,12 @@ static struct ieee80211_regdomain *country_ie_2_rd(
615 continue; 908 continue;
616 } 909 }
617 910
911 if (triplet->chans.first_channel == 0) {
912 country_ie++;
913 country_ie_len--;
914 break;
915 }
916
618 reg_rule = &rd->reg_rules[i]; 917 reg_rule = &rd->reg_rules[i];
619 freq_range = &reg_rule->freq_range; 918 freq_range = &reg_rule->freq_range;
620 power_rule = &reg_rule->power_rule; 919 power_rule = &reg_rule->power_rule;
@@ -622,13 +921,20 @@ static struct ieee80211_regdomain *country_ie_2_rd(
622 reg_rule->flags = flags; 921 reg_rule->flags = flags;
623 922
624 /* 2 GHz */ 923 /* 2 GHz */
625 if (triplet->chans.first_channel <= 14) 924 if (band == IEEE80211_BAND_2GHZ)
626 end_channel = triplet->chans.first_channel + 925 end_channel = triplet->chans.first_channel +
627 triplet->chans.num_channels; 926 triplet->chans.num_channels -1;
628 else 927 else
629 end_channel = triplet->chans.first_channel + 928 end_channel = triplet->chans.first_channel +
630 (4 * (triplet->chans.num_channels - 1)); 929 (4 * (triplet->chans.num_channels - 1));
631 930
931 end_channel = max_subband_chan(band,
932 triplet->chans.first_channel,
933 end_channel,
934 triplet->chans.max_power,
935 &country_ie,
936 &country_ie_len);
937
632 /* 938 /*
633 * The +10 is since the regulatory domain expects 939 * The +10 is since the regulatory domain expects
634 * the actual band edge, not the center of freq for 940 * the actual band edge, not the center of freq for
@@ -649,12 +955,15 @@ static struct ieee80211_regdomain *country_ie_2_rd(
649 */ 955 */
650 freq_range->max_bandwidth_khz = MHZ_TO_KHZ(40); 956 freq_range->max_bandwidth_khz = MHZ_TO_KHZ(40);
651 power_rule->max_antenna_gain = DBI_TO_MBI(100); 957 power_rule->max_antenna_gain = DBI_TO_MBI(100);
652 power_rule->max_eirp = DBM_TO_MBM(100); 958 power_rule->max_eirp = DBM_TO_MBM(triplet->chans.max_power);
653 959
654 country_ie += 3;
655 country_ie_len -= 3;
656 i++; 960 i++;
657 961
962 if (country_ie_len >= 3) {
963 country_ie += 3;
964 country_ie_len -= 3;
965 }
966
658 BUG_ON(i > NL80211_MAX_SUPP_REG_RULES); 967 BUG_ON(i > NL80211_MAX_SUPP_REG_RULES);
659 } 968 }
660 969
@@ -950,25 +1259,21 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
950 if (r == -ERANGE && 1259 if (r == -ERANGE &&
951 last_request->initiator == 1260 last_request->initiator ==
952 NL80211_REGDOM_SET_BY_COUNTRY_IE) { 1261 NL80211_REGDOM_SET_BY_COUNTRY_IE) {
953#ifdef CONFIG_CFG80211_REG_DEBUG 1262 REG_DBG_PRINT("cfg80211: Leaving channel %d MHz "
954 printk(KERN_DEBUG "cfg80211: Leaving channel %d MHz "
955 "intact on %s - no rule found in band on " 1263 "intact on %s - no rule found in band on "
956 "Country IE\n", 1264 "Country IE\n",
957 chan->center_freq, wiphy_name(wiphy)); 1265 chan->center_freq, wiphy_name(wiphy));
958#endif
959 } else { 1266 } else {
960 /* 1267 /*
961 * In this case we know the country IE has at least one reg rule 1268 * In this case we know the country IE has at least one reg rule
962 * for the band so we respect its band definitions 1269 * for the band so we respect its band definitions
963 */ 1270 */
964#ifdef CONFIG_CFG80211_REG_DEBUG
965 if (last_request->initiator == 1271 if (last_request->initiator ==
966 NL80211_REGDOM_SET_BY_COUNTRY_IE) 1272 NL80211_REGDOM_SET_BY_COUNTRY_IE)
967 printk(KERN_DEBUG "cfg80211: Disabling " 1273 REG_DBG_PRINT("cfg80211: Disabling "
968 "channel %d MHz on %s due to " 1274 "channel %d MHz on %s due to "
969 "Country IE\n", 1275 "Country IE\n",
970 chan->center_freq, wiphy_name(wiphy)); 1276 chan->center_freq, wiphy_name(wiphy));
971#endif
972 flags |= IEEE80211_CHAN_DISABLED; 1277 flags |= IEEE80211_CHAN_DISABLED;
973 chan->flags = flags; 1278 chan->flags = flags;
974 } 1279 }
@@ -1342,30 +1647,6 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
1342} 1647}
1343EXPORT_SYMBOL(wiphy_apply_custom_regulatory); 1648EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
1344 1649
1345static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd,
1346 const struct ieee80211_regdomain *src_regd)
1347{
1348 struct ieee80211_regdomain *regd;
1349 int size_of_regd = 0;
1350 unsigned int i;
1351
1352 size_of_regd = sizeof(struct ieee80211_regdomain) +
1353 ((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule));
1354
1355 regd = kzalloc(size_of_regd, GFP_KERNEL);
1356 if (!regd)
1357 return -ENOMEM;
1358
1359 memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
1360
1361 for (i = 0; i < src_regd->n_reg_rules; i++)
1362 memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
1363 sizeof(struct ieee80211_reg_rule));
1364
1365 *dst_regd = regd;
1366 return 0;
1367}
1368
1369/* 1650/*
1370 * Return value which can be used by ignore_request() to indicate 1651 * Return value which can be used by ignore_request() to indicate
1371 * it has been determined we should intersect two regulatory domains 1652 * it has been determined we should intersect two regulatory domains
@@ -1387,7 +1668,7 @@ static int ignore_request(struct wiphy *wiphy,
1387 1668
1388 switch (pending_request->initiator) { 1669 switch (pending_request->initiator) {
1389 case NL80211_REGDOM_SET_BY_CORE: 1670 case NL80211_REGDOM_SET_BY_CORE:
1390 return -EINVAL; 1671 return 0;
1391 case NL80211_REGDOM_SET_BY_COUNTRY_IE: 1672 case NL80211_REGDOM_SET_BY_COUNTRY_IE:
1392 1673
1393 last_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx); 1674 last_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
@@ -1418,8 +1699,6 @@ static int ignore_request(struct wiphy *wiphy,
1418 return REG_INTERSECT; 1699 return REG_INTERSECT;
1419 case NL80211_REGDOM_SET_BY_DRIVER: 1700 case NL80211_REGDOM_SET_BY_DRIVER:
1420 if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) { 1701 if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) {
1421 if (is_old_static_regdom(cfg80211_regdomain))
1422 return 0;
1423 if (regdom_changes(pending_request->alpha2)) 1702 if (regdom_changes(pending_request->alpha2))
1424 return 0; 1703 return 0;
1425 return -EALREADY; 1704 return -EALREADY;
@@ -1456,8 +1735,7 @@ static int ignore_request(struct wiphy *wiphy,
1456 return -EAGAIN; 1735 return -EAGAIN;
1457 } 1736 }
1458 1737
1459 if (!is_old_static_regdom(cfg80211_regdomain) && 1738 if (!regdom_changes(pending_request->alpha2))
1460 !regdom_changes(pending_request->alpha2))
1461 return -EALREADY; 1739 return -EALREADY;
1462 1740
1463 return 0; 1741 return 0;
@@ -1529,6 +1807,11 @@ new_request:
1529 1807
1530 pending_request = NULL; 1808 pending_request = NULL;
1531 1809
1810 if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) {
1811 user_alpha2[0] = last_request->alpha2[0];
1812 user_alpha2[1] = last_request->alpha2[1];
1813 }
1814
1532 /* When r == REG_INTERSECT we do need to call CRDA */ 1815 /* When r == REG_INTERSECT we do need to call CRDA */
1533 if (r < 0) { 1816 if (r < 0) {
1534 /* 1817 /*
@@ -1648,12 +1931,16 @@ static void queue_regulatory_request(struct regulatory_request *request)
1648 schedule_work(&reg_work); 1931 schedule_work(&reg_work);
1649} 1932}
1650 1933
1651/* Core regulatory hint -- happens once during cfg80211_init() */ 1934/*
1935 * Core regulatory hint -- happens during cfg80211_init()
1936 * and when we restore regulatory settings.
1937 */
1652static int regulatory_hint_core(const char *alpha2) 1938static int regulatory_hint_core(const char *alpha2)
1653{ 1939{
1654 struct regulatory_request *request; 1940 struct regulatory_request *request;
1655 1941
1656 BUG_ON(last_request); 1942 kfree(last_request);
1943 last_request = NULL;
1657 1944
1658 request = kzalloc(sizeof(struct regulatory_request), 1945 request = kzalloc(sizeof(struct regulatory_request),
1659 GFP_KERNEL); 1946 GFP_KERNEL);
@@ -1664,14 +1951,12 @@ static int regulatory_hint_core(const char *alpha2)
1664 request->alpha2[1] = alpha2[1]; 1951 request->alpha2[1] = alpha2[1];
1665 request->initiator = NL80211_REGDOM_SET_BY_CORE; 1952 request->initiator = NL80211_REGDOM_SET_BY_CORE;
1666 1953
1667 queue_regulatory_request(request);
1668
1669 /* 1954 /*
1670 * This ensures last_request is populated once modules 1955 * This ensures last_request is populated once modules
1671 * come swinging in and calling regulatory hints and 1956 * come swinging in and calling regulatory hints and
1672 * wiphy_apply_custom_regulatory(). 1957 * wiphy_apply_custom_regulatory().
1673 */ 1958 */
1674 flush_scheduled_work(); 1959 reg_process_hint(request);
1675 1960
1676 return 0; 1961 return 0;
1677} 1962}
@@ -1758,8 +2043,9 @@ static bool reg_same_country_ie_hint(struct wiphy *wiphy,
1758 * therefore cannot iterate over the rdev list here. 2043 * therefore cannot iterate over the rdev list here.
1759 */ 2044 */
1760void regulatory_hint_11d(struct wiphy *wiphy, 2045void regulatory_hint_11d(struct wiphy *wiphy,
1761 u8 *country_ie, 2046 enum ieee80211_band band,
1762 u8 country_ie_len) 2047 u8 *country_ie,
2048 u8 country_ie_len)
1763{ 2049{
1764 struct ieee80211_regdomain *rd = NULL; 2050 struct ieee80211_regdomain *rd = NULL;
1765 char alpha2[2]; 2051 char alpha2[2];
@@ -1805,9 +2091,11 @@ void regulatory_hint_11d(struct wiphy *wiphy,
1805 wiphy_idx_valid(last_request->wiphy_idx))) 2091 wiphy_idx_valid(last_request->wiphy_idx)))
1806 goto out; 2092 goto out;
1807 2093
1808 rd = country_ie_2_rd(country_ie, country_ie_len, &checksum); 2094 rd = country_ie_2_rd(band, country_ie, country_ie_len, &checksum);
1809 if (!rd) 2095 if (!rd) {
2096 REG_DBG_PRINT("cfg80211: Ignoring bogus country IE\n");
1810 goto out; 2097 goto out;
2098 }
1811 2099
1812 /* 2100 /*
1813 * This will not happen right now but we leave it here for the 2101 * This will not happen right now but we leave it here for the
@@ -1850,6 +2138,123 @@ out:
1850 mutex_unlock(&reg_mutex); 2138 mutex_unlock(&reg_mutex);
1851} 2139}
1852 2140
2141static void restore_alpha2(char *alpha2, bool reset_user)
2142{
2143 /* indicates there is no alpha2 to consider for restoration */
2144 alpha2[0] = '9';
2145 alpha2[1] = '7';
2146
2147 /* The user setting has precedence over the module parameter */
2148 if (is_user_regdom_saved()) {
2149 /* Unless we're asked to ignore it and reset it */
2150 if (reset_user) {
2151 REG_DBG_PRINT("cfg80211: Restoring regulatory settings "
2152 "including user preference\n");
2153 user_alpha2[0] = '9';
2154 user_alpha2[1] = '7';
2155
2156 /*
2157 * If we're ignoring user settings, we still need to
2158 * check the module parameter to ensure we put things
2159 * back as they were for a full restore.
2160 */
2161 if (!is_world_regdom(ieee80211_regdom)) {
2162 REG_DBG_PRINT("cfg80211: Keeping preference on "
2163 "module parameter ieee80211_regdom: %c%c\n",
2164 ieee80211_regdom[0],
2165 ieee80211_regdom[1]);
2166 alpha2[0] = ieee80211_regdom[0];
2167 alpha2[1] = ieee80211_regdom[1];
2168 }
2169 } else {
2170 REG_DBG_PRINT("cfg80211: Restoring regulatory settings "
2171 "while preserving user preference for: %c%c\n",
2172 user_alpha2[0],
2173 user_alpha2[1]);
2174 alpha2[0] = user_alpha2[0];
2175 alpha2[1] = user_alpha2[1];
2176 }
2177 } else if (!is_world_regdom(ieee80211_regdom)) {
2178 REG_DBG_PRINT("cfg80211: Keeping preference on "
2179 "module parameter ieee80211_regdom: %c%c\n",
2180 ieee80211_regdom[0],
2181 ieee80211_regdom[1]);
2182 alpha2[0] = ieee80211_regdom[0];
2183 alpha2[1] = ieee80211_regdom[1];
2184 } else
2185 REG_DBG_PRINT("cfg80211: Restoring regulatory settings\n");
2186}
2187
2188/*
2189 * Restoring regulatory settings involves ingoring any
2190 * possibly stale country IE information and user regulatory
2191 * settings if so desired, this includes any beacon hints
2192 * learned as we could have traveled outside to another country
2193 * after disconnection. To restore regulatory settings we do
2194 * exactly what we did at bootup:
2195 *
2196 * - send a core regulatory hint
2197 * - send a user regulatory hint if applicable
2198 *
2199 * Device drivers that send a regulatory hint for a specific country
2200 * keep their own regulatory domain on wiphy->regd so that does does
2201 * not need to be remembered.
2202 */
2203static void restore_regulatory_settings(bool reset_user)
2204{
2205 char alpha2[2];
2206 struct reg_beacon *reg_beacon, *btmp;
2207
2208 mutex_lock(&cfg80211_mutex);
2209 mutex_lock(&reg_mutex);
2210
2211 reset_regdomains();
2212 restore_alpha2(alpha2, reset_user);
2213
2214 /* Clear beacon hints */
2215 spin_lock_bh(&reg_pending_beacons_lock);
2216 if (!list_empty(&reg_pending_beacons)) {
2217 list_for_each_entry_safe(reg_beacon, btmp,
2218 &reg_pending_beacons, list) {
2219 list_del(&reg_beacon->list);
2220 kfree(reg_beacon);
2221 }
2222 }
2223 spin_unlock_bh(&reg_pending_beacons_lock);
2224
2225 if (!list_empty(&reg_beacon_list)) {
2226 list_for_each_entry_safe(reg_beacon, btmp,
2227 &reg_beacon_list, list) {
2228 list_del(&reg_beacon->list);
2229 kfree(reg_beacon);
2230 }
2231 }
2232
2233 /* First restore to the basic regulatory settings */
2234 cfg80211_regdomain = cfg80211_world_regdom;
2235
2236 mutex_unlock(&reg_mutex);
2237 mutex_unlock(&cfg80211_mutex);
2238
2239 regulatory_hint_core(cfg80211_regdomain->alpha2);
2240
2241 /*
2242 * This restores the ieee80211_regdom module parameter
2243 * preference or the last user requested regulatory
2244 * settings, user regulatory settings takes precedence.
2245 */
2246 if (is_an_alpha2(alpha2))
2247 regulatory_hint_user(user_alpha2);
2248}
2249
2250
2251void regulatory_hint_disconnect(void)
2252{
2253 REG_DBG_PRINT("cfg80211: All devices are disconnected, going to "
2254 "restore regulatory settings\n");
2255 restore_regulatory_settings(false);
2256}
2257
1853static bool freq_is_chan_12_13_14(u16 freq) 2258static bool freq_is_chan_12_13_14(u16 freq)
1854{ 2259{
1855 if (freq == ieee80211_channel_to_frequency(12) || 2260 if (freq == ieee80211_channel_to_frequency(12) ||
@@ -1875,13 +2280,12 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
1875 if (!reg_beacon) 2280 if (!reg_beacon)
1876 return -ENOMEM; 2281 return -ENOMEM;
1877 2282
1878#ifdef CONFIG_CFG80211_REG_DEBUG 2283 REG_DBG_PRINT("cfg80211: Found new beacon on "
1879 printk(KERN_DEBUG "cfg80211: Found new beacon on " 2284 "frequency: %d MHz (Ch %d) on %s\n",
1880 "frequency: %d MHz (Ch %d) on %s\n", 2285 beacon_chan->center_freq,
1881 beacon_chan->center_freq, 2286 ieee80211_frequency_to_channel(beacon_chan->center_freq),
1882 ieee80211_frequency_to_channel(beacon_chan->center_freq), 2287 wiphy_name(wiphy));
1883 wiphy_name(wiphy)); 2288
1884#endif
1885 memcpy(&reg_beacon->chan, beacon_chan, 2289 memcpy(&reg_beacon->chan, beacon_chan,
1886 sizeof(struct ieee80211_channel)); 2290 sizeof(struct ieee80211_channel));
1887 2291
@@ -2039,8 +2443,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2039 * If someone else asked us to change the rd lets only bother 2443 * If someone else asked us to change the rd lets only bother
2040 * checking if the alpha2 changes if CRDA was already called 2444 * checking if the alpha2 changes if CRDA was already called
2041 */ 2445 */
2042 if (!is_old_static_regdom(cfg80211_regdomain) && 2446 if (!regdom_changes(rd->alpha2))
2043 !regdom_changes(rd->alpha2))
2044 return -EINVAL; 2447 return -EINVAL;
2045 } 2448 }
2046 2449
@@ -2239,15 +2642,11 @@ int regulatory_init(void)
2239 spin_lock_init(&reg_requests_lock); 2642 spin_lock_init(&reg_requests_lock);
2240 spin_lock_init(&reg_pending_beacons_lock); 2643 spin_lock_init(&reg_pending_beacons_lock);
2241 2644
2242#ifdef CONFIG_WIRELESS_OLD_REGULATORY
2243 cfg80211_regdomain = static_regdom(ieee80211_regdom);
2244
2245 printk(KERN_INFO "cfg80211: Using static regulatory domain info\n");
2246 print_regdomain_info(cfg80211_regdomain);
2247#else
2248 cfg80211_regdomain = cfg80211_world_regdom; 2645 cfg80211_regdomain = cfg80211_world_regdom;
2249 2646
2250#endif 2647 user_alpha2[0] = '9';
2648 user_alpha2[1] = '7';
2649
2251 /* We always try to get an update for the static regdomain */ 2650 /* We always try to get an update for the static regdomain */
2252 err = regulatory_hint_core(cfg80211_regdomain->alpha2); 2651 err = regulatory_hint_core(cfg80211_regdomain->alpha2);
2253 if (err) { 2652 if (err) {
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 3362c7c069b..b26224a9f3b 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -41,15 +41,44 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
41 * regulatory_hint_11d - hints a country IE as a regulatory domain 41 * regulatory_hint_11d - hints a country IE as a regulatory domain
42 * @wiphy: the wireless device giving the hint (used only for reporting 42 * @wiphy: the wireless device giving the hint (used only for reporting
43 * conflicts) 43 * conflicts)
44 * @band: the band on which the country IE was received on. This determines
45 * the band we'll process the country IE channel triplets for.
44 * @country_ie: pointer to the country IE 46 * @country_ie: pointer to the country IE
45 * @country_ie_len: length of the country IE 47 * @country_ie_len: length of the country IE
46 * 48 *
47 * We will intersect the rd with the what CRDA tells us should apply 49 * We will intersect the rd with the what CRDA tells us should apply
48 * for the alpha2 this country IE belongs to, this prevents APs from 50 * for the alpha2 this country IE belongs to, this prevents APs from
49 * sending us incorrect or outdated information against a country. 51 * sending us incorrect or outdated information against a country.
52 *
53 * The AP is expected to provide Country IE channel triplets for the
54 * band it is on. It is technically possible for APs to send channel
55 * country IE triplets even for channels outside of the band they are
56 * in but for that they would have to use the regulatory extension
57 * in combination with a triplet but this behaviour is currently
58 * not observed. For this reason if a triplet is seen with channel
59 * information for a band the BSS is not present in it will be ignored.
50 */ 60 */
51void regulatory_hint_11d(struct wiphy *wiphy, 61void regulatory_hint_11d(struct wiphy *wiphy,
62 enum ieee80211_band band,
52 u8 *country_ie, 63 u8 *country_ie,
53 u8 country_ie_len); 64 u8 country_ie_len);
54 65
66/**
67 * regulatory_hint_disconnect - informs all devices have been disconneted
68 *
69 * Regulotory rules can be enhanced further upon scanning and upon
70 * connection to an AP. These rules become stale if we disconnect
71 * and go to another country, whether or not we suspend and resume.
72 * If we suspend, go to another country and resume we'll automatically
73 * get disconnected shortly after resuming and things will be reset as well.
74 * This routine is a helper to restore regulatory settings to how they were
75 * prior to our first connect attempt. This includes ignoring country IE and
76 * beacon regulatory hints. The ieee80211_regdom module parameter will always
77 * be respected but if a user had set the regulatory domain that will take
78 * precedence.
79 *
80 * Must be called from process context.
81 */
82void regulatory_hint_disconnect(void);
83
55#endif /* __NET_WIRELESS_REG_H */ 84#endif /* __NET_WIRELESS_REG_H */
diff --git a/net/wireless/regdb.h b/net/wireless/regdb.h
new file mode 100644
index 00000000000..818222c9251
--- /dev/null
+++ b/net/wireless/regdb.h
@@ -0,0 +1,7 @@
1#ifndef __REGDB_H__
2#define __REGDB_H__
3
4extern const struct ieee80211_regdomain *reg_regdb[];
5extern int reg_regdb_size;
6
7#endif /* __REGDB_H__ */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 0c2cbbebca9..978cac3414b 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -100,8 +100,10 @@ static void bss_release(struct kref *ref)
100 if (bss->pub.free_priv) 100 if (bss->pub.free_priv)
101 bss->pub.free_priv(&bss->pub); 101 bss->pub.free_priv(&bss->pub);
102 102
103 if (bss->ies_allocated) 103 if (bss->beacon_ies_allocated)
104 kfree(bss->pub.information_elements); 104 kfree(bss->pub.beacon_ies);
105 if (bss->proberesp_ies_allocated)
106 kfree(bss->pub.proberesp_ies);
105 107
106 BUG_ON(atomic_read(&bss->hold)); 108 BUG_ON(atomic_read(&bss->hold));
107 109
@@ -141,9 +143,9 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
141 dev->bss_generation++; 143 dev->bss_generation++;
142} 144}
143 145
144static u8 *find_ie(u8 num, u8 *ies, int len) 146const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
145{ 147{
146 while (len > 2 && ies[0] != num) { 148 while (len > 2 && ies[0] != eid) {
147 len -= ies[1] + 2; 149 len -= ies[1] + 2;
148 ies += ies[1] + 2; 150 ies += ies[1] + 2;
149 } 151 }
@@ -153,11 +155,12 @@ static u8 *find_ie(u8 num, u8 *ies, int len)
153 return NULL; 155 return NULL;
154 return ies; 156 return ies;
155} 157}
158EXPORT_SYMBOL(cfg80211_find_ie);
156 159
157static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2) 160static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2)
158{ 161{
159 const u8 *ie1 = find_ie(num, ies1, len1); 162 const u8 *ie1 = cfg80211_find_ie(num, ies1, len1);
160 const u8 *ie2 = find_ie(num, ies2, len2); 163 const u8 *ie2 = cfg80211_find_ie(num, ies2, len2);
161 int r; 164 int r;
162 165
163 if (!ie1 && !ie2) 166 if (!ie1 && !ie2)
@@ -183,9 +186,9 @@ static bool is_bss(struct cfg80211_bss *a,
183 if (!ssid) 186 if (!ssid)
184 return true; 187 return true;
185 188
186 ssidie = find_ie(WLAN_EID_SSID, 189 ssidie = cfg80211_find_ie(WLAN_EID_SSID,
187 a->information_elements, 190 a->information_elements,
188 a->len_information_elements); 191 a->len_information_elements);
189 if (!ssidie) 192 if (!ssidie)
190 return false; 193 return false;
191 if (ssidie[1] != ssid_len) 194 if (ssidie[1] != ssid_len)
@@ -202,9 +205,9 @@ static bool is_mesh(struct cfg80211_bss *a,
202 if (!is_zero_ether_addr(a->bssid)) 205 if (!is_zero_ether_addr(a->bssid))
203 return false; 206 return false;
204 207
205 ie = find_ie(WLAN_EID_MESH_ID, 208 ie = cfg80211_find_ie(WLAN_EID_MESH_ID,
206 a->information_elements, 209 a->information_elements,
207 a->len_information_elements); 210 a->len_information_elements);
208 if (!ie) 211 if (!ie)
209 return false; 212 return false;
210 if (ie[1] != meshidlen) 213 if (ie[1] != meshidlen)
@@ -212,9 +215,9 @@ static bool is_mesh(struct cfg80211_bss *a,
212 if (memcmp(ie + 2, meshid, meshidlen)) 215 if (memcmp(ie + 2, meshid, meshidlen))
213 return false; 216 return false;
214 217
215 ie = find_ie(WLAN_EID_MESH_CONFIG, 218 ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
216 a->information_elements, 219 a->information_elements,
217 a->len_information_elements); 220 a->len_information_elements);
218 if (!ie) 221 if (!ie)
219 return false; 222 return false;
220 if (ie[1] != sizeof(struct ieee80211_meshconf_ie)) 223 if (ie[1] != sizeof(struct ieee80211_meshconf_ie))
@@ -375,8 +378,7 @@ rb_find_bss(struct cfg80211_registered_device *dev,
375 378
376static struct cfg80211_internal_bss * 379static struct cfg80211_internal_bss *
377cfg80211_bss_update(struct cfg80211_registered_device *dev, 380cfg80211_bss_update(struct cfg80211_registered_device *dev,
378 struct cfg80211_internal_bss *res, 381 struct cfg80211_internal_bss *res)
379 bool overwrite)
380{ 382{
381 struct cfg80211_internal_bss *found = NULL; 383 struct cfg80211_internal_bss *found = NULL;
382 const u8 *meshid, *meshcfg; 384 const u8 *meshid, *meshcfg;
@@ -394,11 +396,12 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
394 396
395 if (is_zero_ether_addr(res->pub.bssid)) { 397 if (is_zero_ether_addr(res->pub.bssid)) {
396 /* must be mesh, verify */ 398 /* must be mesh, verify */
397 meshid = find_ie(WLAN_EID_MESH_ID, res->pub.information_elements, 399 meshid = cfg80211_find_ie(WLAN_EID_MESH_ID,
398 res->pub.len_information_elements); 400 res->pub.information_elements,
399 meshcfg = find_ie(WLAN_EID_MESH_CONFIG, 401 res->pub.len_information_elements);
400 res->pub.information_elements, 402 meshcfg = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
401 res->pub.len_information_elements); 403 res->pub.information_elements,
404 res->pub.len_information_elements);
402 if (!meshid || !meshcfg || 405 if (!meshid || !meshcfg ||
403 meshcfg[1] != sizeof(struct ieee80211_meshconf_ie)) { 406 meshcfg[1] != sizeof(struct ieee80211_meshconf_ie)) {
404 /* bogus mesh */ 407 /* bogus mesh */
@@ -418,28 +421,64 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
418 found->pub.capability = res->pub.capability; 421 found->pub.capability = res->pub.capability;
419 found->ts = res->ts; 422 found->ts = res->ts;
420 423
421 /* overwrite IEs */ 424 /* Update IEs */
422 if (overwrite) { 425 if (res->pub.proberesp_ies) {
423 size_t used = dev->wiphy.bss_priv_size + sizeof(*res); 426 size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
424 size_t ielen = res->pub.len_information_elements; 427 size_t ielen = res->pub.len_proberesp_ies;
428
429 if (found->pub.proberesp_ies &&
430 !found->proberesp_ies_allocated &&
431 ksize(found) >= used + ielen) {
432 memcpy(found->pub.proberesp_ies,
433 res->pub.proberesp_ies, ielen);
434 found->pub.len_proberesp_ies = ielen;
435 } else {
436 u8 *ies = found->pub.proberesp_ies;
437
438 if (found->proberesp_ies_allocated)
439 ies = krealloc(ies, ielen, GFP_ATOMIC);
440 else
441 ies = kmalloc(ielen, GFP_ATOMIC);
442
443 if (ies) {
444 memcpy(ies, res->pub.proberesp_ies,
445 ielen);
446 found->proberesp_ies_allocated = true;
447 found->pub.proberesp_ies = ies;
448 found->pub.len_proberesp_ies = ielen;
449 }
450 }
425 451
426 if (!found->ies_allocated && ksize(found) >= used + ielen) { 452 /* Override possible earlier Beacon frame IEs */
427 memcpy(found->pub.information_elements, 453 found->pub.information_elements =
428 res->pub.information_elements, ielen); 454 found->pub.proberesp_ies;
429 found->pub.len_information_elements = ielen; 455 found->pub.len_information_elements =
456 found->pub.len_proberesp_ies;
457 }
458 if (res->pub.beacon_ies) {
459 size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
460 size_t ielen = res->pub.len_beacon_ies;
461
462 if (found->pub.beacon_ies &&
463 !found->beacon_ies_allocated &&
464 ksize(found) >= used + ielen) {
465 memcpy(found->pub.beacon_ies,
466 res->pub.beacon_ies, ielen);
467 found->pub.len_beacon_ies = ielen;
430 } else { 468 } else {
431 u8 *ies = found->pub.information_elements; 469 u8 *ies = found->pub.beacon_ies;
432 470
433 if (found->ies_allocated) 471 if (found->beacon_ies_allocated)
434 ies = krealloc(ies, ielen, GFP_ATOMIC); 472 ies = krealloc(ies, ielen, GFP_ATOMIC);
435 else 473 else
436 ies = kmalloc(ielen, GFP_ATOMIC); 474 ies = kmalloc(ielen, GFP_ATOMIC);
437 475
438 if (ies) { 476 if (ies) {
439 memcpy(ies, res->pub.information_elements, ielen); 477 memcpy(ies, res->pub.beacon_ies,
440 found->ies_allocated = true; 478 ielen);
441 found->pub.information_elements = ies; 479 found->beacon_ies_allocated = true;
442 found->pub.len_information_elements = ielen; 480 found->pub.beacon_ies = ies;
481 found->pub.len_beacon_ies = ielen;
443 } 482 }
444 } 483 }
445 } 484 }
@@ -489,14 +528,26 @@ cfg80211_inform_bss(struct wiphy *wiphy,
489 res->pub.tsf = timestamp; 528 res->pub.tsf = timestamp;
490 res->pub.beacon_interval = beacon_interval; 529 res->pub.beacon_interval = beacon_interval;
491 res->pub.capability = capability; 530 res->pub.capability = capability;
492 /* point to after the private area */ 531 /*
493 res->pub.information_elements = (u8 *)res + sizeof(*res) + privsz; 532 * Since we do not know here whether the IEs are from a Beacon or Probe
494 memcpy(res->pub.information_elements, ie, ielen); 533 * Response frame, we need to pick one of the options and only use it
495 res->pub.len_information_elements = ielen; 534 * with the driver that does not provide the full Beacon/Probe Response
535 * frame. Use Beacon frame pointer to avoid indicating that this should
536 * override the information_elements pointer should we have received an
537 * earlier indication of Probe Response data.
538 *
539 * The initial buffer for the IEs is allocated with the BSS entry and
540 * is located after the private area.
541 */
542 res->pub.beacon_ies = (u8 *)res + sizeof(*res) + privsz;
543 memcpy(res->pub.beacon_ies, ie, ielen);
544 res->pub.len_beacon_ies = ielen;
545 res->pub.information_elements = res->pub.beacon_ies;
546 res->pub.len_information_elements = res->pub.len_beacon_ies;
496 547
497 kref_init(&res->ref); 548 kref_init(&res->ref);
498 549
499 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res, 0); 550 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res);
500 if (!res) 551 if (!res)
501 return NULL; 552 return NULL;
502 553
@@ -517,7 +568,6 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
517 struct cfg80211_internal_bss *res; 568 struct cfg80211_internal_bss *res;
518 size_t ielen = len - offsetof(struct ieee80211_mgmt, 569 size_t ielen = len - offsetof(struct ieee80211_mgmt,
519 u.probe_resp.variable); 570 u.probe_resp.variable);
520 bool overwrite;
521 size_t privsz = wiphy->bss_priv_size; 571 size_t privsz = wiphy->bss_priv_size;
522 572
523 if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC && 573 if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC &&
@@ -538,16 +588,28 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
538 res->pub.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp); 588 res->pub.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
539 res->pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int); 589 res->pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
540 res->pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info); 590 res->pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
541 /* point to after the private area */ 591 /*
542 res->pub.information_elements = (u8 *)res + sizeof(*res) + privsz; 592 * The initial buffer for the IEs is allocated with the BSS entry and
543 memcpy(res->pub.information_elements, mgmt->u.probe_resp.variable, ielen); 593 * is located after the private area.
544 res->pub.len_information_elements = ielen; 594 */
595 if (ieee80211_is_probe_resp(mgmt->frame_control)) {
596 res->pub.proberesp_ies = (u8 *) res + sizeof(*res) + privsz;
597 memcpy(res->pub.proberesp_ies, mgmt->u.probe_resp.variable,
598 ielen);
599 res->pub.len_proberesp_ies = ielen;
600 res->pub.information_elements = res->pub.proberesp_ies;
601 res->pub.len_information_elements = res->pub.len_proberesp_ies;
602 } else {
603 res->pub.beacon_ies = (u8 *) res + sizeof(*res) + privsz;
604 memcpy(res->pub.beacon_ies, mgmt->u.beacon.variable, ielen);
605 res->pub.len_beacon_ies = ielen;
606 res->pub.information_elements = res->pub.beacon_ies;
607 res->pub.len_information_elements = res->pub.len_beacon_ies;
608 }
545 609
546 kref_init(&res->ref); 610 kref_init(&res->ref);
547 611
548 overwrite = ieee80211_is_probe_resp(mgmt->frame_control); 612 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res);
549
550 res = cfg80211_bss_update(wiphy_to_dev(wiphy), res, overwrite);
551 if (!res) 613 if (!res)
552 return NULL; 614 return NULL;
553 615
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index dc0fc4989d5..17fde0da1b0 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -34,6 +34,44 @@ struct cfg80211_conn {
34 bool auto_auth, prev_bssid_valid; 34 bool auto_auth, prev_bssid_valid;
35}; 35};
36 36
37bool cfg80211_is_all_idle(void)
38{
39 struct cfg80211_registered_device *rdev;
40 struct wireless_dev *wdev;
41 bool is_all_idle = true;
42
43 mutex_lock(&cfg80211_mutex);
44
45 /*
46 * All devices must be idle as otherwise if you are actively
47 * scanning some new beacon hints could be learned and would
48 * count as new regulatory hints.
49 */
50 list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
51 cfg80211_lock_rdev(rdev);
52 list_for_each_entry(wdev, &rdev->netdev_list, list) {
53 wdev_lock(wdev);
54 if (wdev->sme_state != CFG80211_SME_IDLE)
55 is_all_idle = false;
56 wdev_unlock(wdev);
57 }
58 cfg80211_unlock_rdev(rdev);
59 }
60
61 mutex_unlock(&cfg80211_mutex);
62
63 return is_all_idle;
64}
65
66static void disconnect_work(struct work_struct *work)
67{
68 if (!cfg80211_is_all_idle())
69 return;
70
71 regulatory_hint_disconnect();
72}
73
74static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
37 75
38static int cfg80211_conn_scan(struct wireless_dev *wdev) 76static int cfg80211_conn_scan(struct wireless_dev *wdev)
39{ 77{
@@ -454,6 +492,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
454 * - and country_ie[1] which is the IE length 492 * - and country_ie[1] which is the IE length
455 */ 493 */
456 regulatory_hint_11d(wdev->wiphy, 494 regulatory_hint_11d(wdev->wiphy,
495 bss->channel->band,
457 country_ie + 2, 496 country_ie + 2,
458 country_ie[1]); 497 country_ie[1]);
459} 498}
@@ -657,6 +696,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
657 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); 696 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
658 wdev->wext.connect.ssid_len = 0; 697 wdev->wext.connect.ssid_len = 0;
659#endif 698#endif
699
700 schedule_work(&cfg80211_disconnect_work);
660} 701}
661 702
662void cfg80211_disconnected(struct net_device *dev, u16 reason, 703void cfg80211_disconnected(struct net_device *dev, u16 reason,
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index efe3c5c92b2..9f2cef3e0ca 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -33,10 +33,30 @@ static ssize_t name ## _show(struct device *dev, \
33 33
34SHOW_FMT(index, "%d", wiphy_idx); 34SHOW_FMT(index, "%d", wiphy_idx);
35SHOW_FMT(macaddress, "%pM", wiphy.perm_addr); 35SHOW_FMT(macaddress, "%pM", wiphy.perm_addr);
36SHOW_FMT(address_mask, "%pM", wiphy.addr_mask);
37
38static ssize_t addresses_show(struct device *dev,
39 struct device_attribute *attr,
40 char *buf)
41{
42 struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy;
43 char *start = buf;
44 int i;
45
46 if (!wiphy->addresses)
47 return sprintf(buf, "%pM\n", wiphy->perm_addr);
48
49 for (i = 0; i < wiphy->n_addresses; i++)
50 buf += sprintf(buf, "%pM\n", &wiphy->addresses[i].addr);
51
52 return buf - start;
53}
36 54
37static struct device_attribute ieee80211_dev_attrs[] = { 55static struct device_attribute ieee80211_dev_attrs[] = {
38 __ATTR_RO(index), 56 __ATTR_RO(index),
39 __ATTR_RO(macaddress), 57 __ATTR_RO(macaddress),
58 __ATTR_RO(address_mask),
59 __ATTR_RO(addresses),
40 {} 60 {}
41}; 61};
42 62
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 59361fdcb5d..be2ab8c59e3 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -227,8 +227,11 @@ unsigned int ieee80211_hdrlen(__le16 fc)
227 if (ieee80211_is_data(fc)) { 227 if (ieee80211_is_data(fc)) {
228 if (ieee80211_has_a4(fc)) 228 if (ieee80211_has_a4(fc))
229 hdrlen = 30; 229 hdrlen = 30;
230 if (ieee80211_is_data_qos(fc)) 230 if (ieee80211_is_data_qos(fc)) {
231 hdrlen += IEEE80211_QOS_CTL_LEN; 231 hdrlen += IEEE80211_QOS_CTL_LEN;
232 if (ieee80211_has_order(fc))
233 hdrlen += IEEE80211_HT_CTL_LEN;
234 }
232 goto out; 235 goto out;
233 } 236 }
234 237
@@ -285,7 +288,7 @@ static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
285 } 288 }
286} 289}
287 290
288int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr, 291int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
289 enum nl80211_iftype iftype) 292 enum nl80211_iftype iftype)
290{ 293{
291 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 294 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -383,7 +386,7 @@ int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr,
383} 386}
384EXPORT_SYMBOL(ieee80211_data_to_8023); 387EXPORT_SYMBOL(ieee80211_data_to_8023);
385 388
386int ieee80211_data_from_8023(struct sk_buff *skb, u8 *addr, 389int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
387 enum nl80211_iftype iftype, u8 *bssid, bool qos) 390 enum nl80211_iftype iftype, u8 *bssid, bool qos)
388{ 391{
389 struct ieee80211_hdr hdr; 392 struct ieee80211_hdr hdr;
@@ -497,6 +500,101 @@ int ieee80211_data_from_8023(struct sk_buff *skb, u8 *addr,
497} 500}
498EXPORT_SYMBOL(ieee80211_data_from_8023); 501EXPORT_SYMBOL(ieee80211_data_from_8023);
499 502
503
504void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
505 const u8 *addr, enum nl80211_iftype iftype,
506 const unsigned int extra_headroom)
507{
508 struct sk_buff *frame = NULL;
509 u16 ethertype;
510 u8 *payload;
511 const struct ethhdr *eth;
512 int remaining, err;
513 u8 dst[ETH_ALEN], src[ETH_ALEN];
514
515 err = ieee80211_data_to_8023(skb, addr, iftype);
516 if (err)
517 goto out;
518
519 /* skip the wrapping header */
520 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
521 if (!eth)
522 goto out;
523
524 while (skb != frame) {
525 u8 padding;
526 __be16 len = eth->h_proto;
527 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
528
529 remaining = skb->len;
530 memcpy(dst, eth->h_dest, ETH_ALEN);
531 memcpy(src, eth->h_source, ETH_ALEN);
532
533 padding = (4 - subframe_len) & 0x3;
534 /* the last MSDU has no padding */
535 if (subframe_len > remaining)
536 goto purge;
537
538 skb_pull(skb, sizeof(struct ethhdr));
539 /* reuse skb for the last subframe */
540 if (remaining <= subframe_len + padding)
541 frame = skb;
542 else {
543 unsigned int hlen = ALIGN(extra_headroom, 4);
544 /*
545 * Allocate and reserve two bytes more for payload
546 * alignment since sizeof(struct ethhdr) is 14.
547 */
548 frame = dev_alloc_skb(hlen + subframe_len + 2);
549 if (!frame)
550 goto purge;
551
552 skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
553 memcpy(skb_put(frame, ntohs(len)), skb->data,
554 ntohs(len));
555
556 eth = (struct ethhdr *)skb_pull(skb, ntohs(len) +
557 padding);
558 if (!eth) {
559 dev_kfree_skb(frame);
560 goto purge;
561 }
562 }
563
564 skb_reset_network_header(frame);
565 frame->dev = skb->dev;
566 frame->priority = skb->priority;
567
568 payload = frame->data;
569 ethertype = (payload[6] << 8) | payload[7];
570
571 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
572 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
573 compare_ether_addr(payload,
574 bridge_tunnel_header) == 0)) {
575 /* remove RFC1042 or Bridge-Tunnel
576 * encapsulation and replace EtherType */
577 skb_pull(frame, 6);
578 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
579 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
580 } else {
581 memcpy(skb_push(frame, sizeof(__be16)), &len,
582 sizeof(__be16));
583 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
584 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
585 }
586 __skb_queue_tail(list, frame);
587 }
588
589 return;
590
591 purge:
592 __skb_queue_purge(list);
593 out:
594 dev_kfree_skb(skb);
595}
596EXPORT_SYMBOL(ieee80211_amsdu_to_8023s);
597
500/* Given a data frame determine the 802.1p/1d tag to use. */ 598/* Given a data frame determine the 802.1p/1d tag to use. */
501unsigned int cfg80211_classify8021d(struct sk_buff *skb) 599unsigned int cfg80211_classify8021d(struct sk_buff *skb)
502{ 600{
@@ -720,3 +818,36 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
720 818
721 return err; 819 return err;
722} 820}
821
822u16 cfg80211_calculate_bitrate(struct rate_info *rate)
823{
824 int modulation, streams, bitrate;
825
826 if (!(rate->flags & RATE_INFO_FLAGS_MCS))
827 return rate->legacy;
828
829 /* the formula below does only work for MCS values smaller than 32 */
830 if (rate->mcs >= 32)
831 return 0;
832
833 modulation = rate->mcs & 7;
834 streams = (rate->mcs >> 3) + 1;
835
836 bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
837 13500000 : 6500000;
838
839 if (modulation < 4)
840 bitrate *= (modulation + 1);
841 else if (modulation == 4)
842 bitrate *= (modulation + 2);
843 else
844 bitrate *= (modulation + 3);
845
846 bitrate *= streams;
847
848 if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
849 bitrate = (bitrate / 9) * 10;
850
851 /* do NOT round down here */
852 return (bitrate + 50000) / 100000;
853}
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 54face3d442..b17eeae448d 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1204,21 +1204,47 @@ int cfg80211_wext_siwrate(struct net_device *dev,
1204 struct wireless_dev *wdev = dev->ieee80211_ptr; 1204 struct wireless_dev *wdev = dev->ieee80211_ptr;
1205 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 1205 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
1206 struct cfg80211_bitrate_mask mask; 1206 struct cfg80211_bitrate_mask mask;
1207 u32 fixed, maxrate;
1208 struct ieee80211_supported_band *sband;
1209 int band, ridx;
1210 bool match = false;
1207 1211
1208 if (!rdev->ops->set_bitrate_mask) 1212 if (!rdev->ops->set_bitrate_mask)
1209 return -EOPNOTSUPP; 1213 return -EOPNOTSUPP;
1210 1214
1211 mask.fixed = 0; 1215 memset(&mask, 0, sizeof(mask));
1212 mask.maxrate = 0; 1216 fixed = 0;
1217 maxrate = (u32)-1;
1213 1218
1214 if (rate->value < 0) { 1219 if (rate->value < 0) {
1215 /* nothing */ 1220 /* nothing */
1216 } else if (rate->fixed) { 1221 } else if (rate->fixed) {
1217 mask.fixed = rate->value / 1000; /* kbps */ 1222 fixed = rate->value / 100000;
1218 } else { 1223 } else {
1219 mask.maxrate = rate->value / 1000; /* kbps */ 1224 maxrate = rate->value / 100000;
1220 } 1225 }
1221 1226
1227 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1228 sband = wdev->wiphy->bands[band];
1229 if (sband == NULL)
1230 continue;
1231 for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
1232 struct ieee80211_rate *srate = &sband->bitrates[ridx];
1233 if (fixed == srate->bitrate) {
1234 mask.control[band].legacy = 1 << ridx;
1235 match = true;
1236 break;
1237 }
1238 if (srate->bitrate <= maxrate) {
1239 mask.control[band].legacy |= 1 << ridx;
1240 match = true;
1241 }
1242 }
1243 }
1244
1245 if (!match)
1246 return -EINVAL;
1247
1222 return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask); 1248 return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask);
1223} 1249}
1224EXPORT_SYMBOL_GPL(cfg80211_wext_siwrate); 1250EXPORT_SYMBOL_GPL(cfg80211_wext_siwrate);
@@ -1257,10 +1283,7 @@ int cfg80211_wext_giwrate(struct net_device *dev,
1257 if (!(sinfo.filled & STATION_INFO_TX_BITRATE)) 1283 if (!(sinfo.filled & STATION_INFO_TX_BITRATE))
1258 return -EOPNOTSUPP; 1284 return -EOPNOTSUPP;
1259 1285
1260 rate->value = 0; 1286 rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
1261
1262 if (!(sinfo.txrate.flags & RATE_INFO_FLAGS_MCS))
1263 rate->value = 100000 * sinfo.txrate.legacy;
1264 1287
1265 return 0; 1288 return 0;
1266} 1289}
diff --git a/net/wireless/wext-proc.c b/net/wireless/wext-proc.c
index 273a7f77c83..8bafa31fa9f 100644
--- a/net/wireless/wext-proc.c
+++ b/net/wireless/wext-proc.c
@@ -140,7 +140,7 @@ static const struct file_operations wireless_seq_fops = {
140 .release = seq_release_net, 140 .release = seq_release_net,
141}; 141};
142 142
143int wext_proc_init(struct net *net) 143int __net_init wext_proc_init(struct net *net)
144{ 144{
145 /* Create /proc/net/wireless entry */ 145 /* Create /proc/net/wireless entry */
146 if (!proc_net_fops_create(net, "wireless", S_IRUGO, &wireless_seq_fops)) 146 if (!proc_net_fops_create(net, "wireless", S_IRUGO, &wireless_seq_fops))
@@ -149,7 +149,7 @@ int wext_proc_init(struct net *net)
149 return 0; 149 return 0;
150} 150}
151 151
152void wext_proc_exit(struct net *net) 152void __net_exit wext_proc_exit(struct net *net)
153{ 153{
154 proc_net_remove(net, "wireless"); 154 proc_net_remove(net, "wireless");
155} 155}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index e3219e4cd04..9796f3ed1ed 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -55,6 +55,7 @@
55#include <linux/notifier.h> 55#include <linux/notifier.h>
56#include <linux/init.h> 56#include <linux/init.h>
57#include <linux/compat.h> 57#include <linux/compat.h>
58#include <linux/ctype.h>
58 59
59#include <net/x25.h> 60#include <net/x25.h>
60#include <net/compat.h> 61#include <net/compat.h>
@@ -512,15 +513,20 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
512{ 513{
513 struct sock *sk; 514 struct sock *sk;
514 struct x25_sock *x25; 515 struct x25_sock *x25;
515 int rc = -ESOCKTNOSUPPORT; 516 int rc = -EAFNOSUPPORT;
516 517
517 if (!net_eq(net, &init_net)) 518 if (!net_eq(net, &init_net))
518 return -EAFNOSUPPORT; 519 goto out;
519 520
520 if (sock->type != SOCK_SEQPACKET || protocol) 521 rc = -ESOCKTNOSUPPORT;
522 if (sock->type != SOCK_SEQPACKET)
521 goto out; 523 goto out;
522 524
523 rc = -ENOMEM; 525 rc = -EINVAL;
526 if (protocol)
527 goto out;
528
529 rc = -ENOBUFS;
524 if ((sk = x25_alloc_socket(net)) == NULL) 530 if ((sk = x25_alloc_socket(net)) == NULL)
525 goto out; 531 goto out;
526 532
@@ -643,7 +649,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
643{ 649{
644 struct sock *sk = sock->sk; 650 struct sock *sk = sock->sk;
645 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; 651 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
646 int rc = 0; 652 int len, i, rc = 0;
647 653
648 lock_kernel(); 654 lock_kernel();
649 if (!sock_flag(sk, SOCK_ZAPPED) || 655 if (!sock_flag(sk, SOCK_ZAPPED) ||
@@ -653,6 +659,14 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
653 goto out; 659 goto out;
654 } 660 }
655 661
662 len = strlen(addr->sx25_addr.x25_addr);
663 for (i = 0; i < len; i++) {
664 if (!isdigit(addr->sx25_addr.x25_addr[i])) {
665 rc = -EINVAL;
666 goto out;
667 }
668 }
669
656 x25_sk(sk)->source_addr = addr->sx25_addr; 670 x25_sk(sk)->source_addr = addr->sx25_addr;
657 x25_insert_socket(sk); 671 x25_insert_socket(sk);
658 sock_reset_flag(sk, SOCK_ZAPPED); 672 sock_reset_flag(sk, SOCK_ZAPPED);
diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c
index 0a04e62e0e1..7ff37379232 100644
--- a/net/x25/x25_proc.c
+++ b/net/x25/x25_proc.c
@@ -25,49 +25,17 @@
25#include <net/x25.h> 25#include <net/x25.h>
26 26
27#ifdef CONFIG_PROC_FS 27#ifdef CONFIG_PROC_FS
28static __inline__ struct x25_route *x25_get_route_idx(loff_t pos)
29{
30 struct list_head *route_entry;
31 struct x25_route *rt = NULL;
32
33 list_for_each(route_entry, &x25_route_list) {
34 rt = list_entry(route_entry, struct x25_route, node);
35 if (!pos--)
36 goto found;
37 }
38 rt = NULL;
39found:
40 return rt;
41}
42 28
43static void *x25_seq_route_start(struct seq_file *seq, loff_t *pos) 29static void *x25_seq_route_start(struct seq_file *seq, loff_t *pos)
44 __acquires(x25_route_list_lock) 30 __acquires(x25_route_list_lock)
45{ 31{
46 loff_t l = *pos;
47
48 read_lock_bh(&x25_route_list_lock); 32 read_lock_bh(&x25_route_list_lock);
49 return l ? x25_get_route_idx(--l) : SEQ_START_TOKEN; 33 return seq_list_start_head(&x25_route_list, *pos);
50} 34}
51 35
52static void *x25_seq_route_next(struct seq_file *seq, void *v, loff_t *pos) 36static void *x25_seq_route_next(struct seq_file *seq, void *v, loff_t *pos)
53{ 37{
54 struct x25_route *rt; 38 return seq_list_next(v, &x25_route_list, pos);
55
56 ++*pos;
57 if (v == SEQ_START_TOKEN) {
58 rt = NULL;
59 if (!list_empty(&x25_route_list))
60 rt = list_entry(x25_route_list.next,
61 struct x25_route, node);
62 goto out;
63 }
64 rt = v;
65 if (rt->node.next != &x25_route_list)
66 rt = list_entry(rt->node.next, struct x25_route, node);
67 else
68 rt = NULL;
69out:
70 return rt;
71} 39}
72 40
73static void x25_seq_route_stop(struct seq_file *seq, void *v) 41static void x25_seq_route_stop(struct seq_file *seq, void *v)
@@ -78,9 +46,9 @@ static void x25_seq_route_stop(struct seq_file *seq, void *v)
78 46
79static int x25_seq_route_show(struct seq_file *seq, void *v) 47static int x25_seq_route_show(struct seq_file *seq, void *v)
80{ 48{
81 struct x25_route *rt; 49 struct x25_route *rt = list_entry(v, struct x25_route, node);
82 50
83 if (v == SEQ_START_TOKEN) { 51 if (v == &x25_route_list) {
84 seq_puts(seq, "Address Digits Device\n"); 52 seq_puts(seq, "Address Digits Device\n");
85 goto out; 53 goto out;
86 } 54 }
@@ -93,40 +61,16 @@ out:
93 return 0; 61 return 0;
94} 62}
95 63
96static __inline__ struct sock *x25_get_socket_idx(loff_t pos)
97{
98 struct sock *s;
99 struct hlist_node *node;
100
101 sk_for_each(s, node, &x25_list)
102 if (!pos--)
103 goto found;
104 s = NULL;
105found:
106 return s;
107}
108
109static void *x25_seq_socket_start(struct seq_file *seq, loff_t *pos) 64static void *x25_seq_socket_start(struct seq_file *seq, loff_t *pos)
110 __acquires(x25_list_lock) 65 __acquires(x25_list_lock)
111{ 66{
112 loff_t l = *pos;
113
114 read_lock_bh(&x25_list_lock); 67 read_lock_bh(&x25_list_lock);
115 return l ? x25_get_socket_idx(--l) : SEQ_START_TOKEN; 68 return seq_hlist_start_head(&x25_list, *pos);
116} 69}
117 70
118static void *x25_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos) 71static void *x25_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
119{ 72{
120 struct sock *s; 73 return seq_hlist_next(v, &x25_list, pos);
121
122 ++*pos;
123 if (v == SEQ_START_TOKEN) {
124 s = sk_head(&x25_list);
125 goto out;
126 }
127 s = sk_next(v);
128out:
129 return s;
130} 74}
131 75
132static void x25_seq_socket_stop(struct seq_file *seq, void *v) 76static void x25_seq_socket_stop(struct seq_file *seq, void *v)
@@ -148,7 +92,7 @@ static int x25_seq_socket_show(struct seq_file *seq, void *v)
148 goto out; 92 goto out;
149 } 93 }
150 94
151 s = v; 95 s = sk_entry(v);
152 x25 = x25_sk(s); 96 x25 = x25_sk(s);
153 97
154 if (!x25->neighbour || (dev = x25->neighbour->dev) == NULL) 98 if (!x25->neighbour || (dev = x25->neighbour->dev) == NULL)
@@ -170,51 +114,16 @@ out:
170 return 0; 114 return 0;
171} 115}
172 116
173static __inline__ struct x25_forward *x25_get_forward_idx(loff_t pos)
174{
175 struct x25_forward *f;
176 struct list_head *entry;
177
178 list_for_each(entry, &x25_forward_list) {
179 f = list_entry(entry, struct x25_forward, node);
180 if (!pos--)
181 goto found;
182 }
183
184 f = NULL;
185found:
186 return f;
187}
188
189static void *x25_seq_forward_start(struct seq_file *seq, loff_t *pos) 117static void *x25_seq_forward_start(struct seq_file *seq, loff_t *pos)
190 __acquires(x25_forward_list_lock) 118 __acquires(x25_forward_list_lock)
191{ 119{
192 loff_t l = *pos;
193
194 read_lock_bh(&x25_forward_list_lock); 120 read_lock_bh(&x25_forward_list_lock);
195 return l ? x25_get_forward_idx(--l) : SEQ_START_TOKEN; 121 return seq_list_start_head(&x25_forward_list, *pos);
196} 122}
197 123
198static void *x25_seq_forward_next(struct seq_file *seq, void *v, loff_t *pos) 124static void *x25_seq_forward_next(struct seq_file *seq, void *v, loff_t *pos)
199{ 125{
200 struct x25_forward *f; 126 return seq_list_next(v, &x25_forward_list, pos);
201
202 ++*pos;
203 if (v == SEQ_START_TOKEN) {
204 f = NULL;
205 if (!list_empty(&x25_forward_list))
206 f = list_entry(x25_forward_list.next,
207 struct x25_forward, node);
208 goto out;
209 }
210 f = v;
211 if (f->node.next != &x25_forward_list)
212 f = list_entry(f->node.next, struct x25_forward, node);
213 else
214 f = NULL;
215out:
216 return f;
217
218} 127}
219 128
220static void x25_seq_forward_stop(struct seq_file *seq, void *v) 129static void x25_seq_forward_stop(struct seq_file *seq, void *v)
@@ -225,9 +134,9 @@ static void x25_seq_forward_stop(struct seq_file *seq, void *v)
225 134
226static int x25_seq_forward_show(struct seq_file *seq, void *v) 135static int x25_seq_forward_show(struct seq_file *seq, void *v)
227{ 136{
228 struct x25_forward *f; 137 struct x25_forward *f = list_entry(v, struct x25_forward, node);
229 138
230 if (v == SEQ_START_TOKEN) { 139 if (v == &x25_forward_list) {
231 seq_printf(seq, "lci dev1 dev2\n"); 140 seq_printf(seq, "lci dev1 dev2\n");
232 goto out; 141 goto out;
233 } 142 }
@@ -236,7 +145,6 @@ static int x25_seq_forward_show(struct seq_file *seq, void *v)
236 145
237 seq_printf(seq, "%d %-10s %-10s\n", 146 seq_printf(seq, "%d %-10s %-10s\n",
238 f->lci, f->dev1->name, f->dev2->name); 147 f->lci, f->dev1->name, f->dev2->name);
239
240out: 148out:
241 return 0; 149 return 0;
242} 150}
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index e0009c17d80..45f1c98d4fc 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -152,7 +152,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
152 goto drop; 152 goto drop;
153 } 153 }
154 154
155 x = xfrm_state_lookup(net, daddr, spi, nexthdr, family); 155 x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family);
156 if (x == NULL) { 156 if (x == NULL) {
157 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); 157 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
158 xfrm_audit_state_notfound(skb, family, spi, seq); 158 xfrm_audit_state_notfound(skb, family, spi, seq);
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index 42cd18391f4..0fc5ff66d1f 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -30,12 +30,12 @@
30 30
31struct ipcomp_tfms { 31struct ipcomp_tfms {
32 struct list_head list; 32 struct list_head list;
33 struct crypto_comp **tfms; 33 struct crypto_comp * __percpu *tfms;
34 int users; 34 int users;
35}; 35};
36 36
37static DEFINE_MUTEX(ipcomp_resource_mutex); 37static DEFINE_MUTEX(ipcomp_resource_mutex);
38static void **ipcomp_scratches; 38static void * __percpu *ipcomp_scratches;
39static int ipcomp_scratch_users; 39static int ipcomp_scratch_users;
40static LIST_HEAD(ipcomp_tfms_list); 40static LIST_HEAD(ipcomp_tfms_list);
41 41
@@ -200,7 +200,7 @@ EXPORT_SYMBOL_GPL(ipcomp_output);
200static void ipcomp_free_scratches(void) 200static void ipcomp_free_scratches(void)
201{ 201{
202 int i; 202 int i;
203 void **scratches; 203 void * __percpu *scratches;
204 204
205 if (--ipcomp_scratch_users) 205 if (--ipcomp_scratch_users)
206 return; 206 return;
@@ -215,10 +215,10 @@ static void ipcomp_free_scratches(void)
215 free_percpu(scratches); 215 free_percpu(scratches);
216} 216}
217 217
218static void **ipcomp_alloc_scratches(void) 218static void * __percpu *ipcomp_alloc_scratches(void)
219{ 219{
220 int i; 220 int i;
221 void **scratches; 221 void * __percpu *scratches;
222 222
223 if (ipcomp_scratch_users++) 223 if (ipcomp_scratch_users++)
224 return ipcomp_scratches; 224 return ipcomp_scratches;
@@ -239,7 +239,7 @@ static void **ipcomp_alloc_scratches(void)
239 return scratches; 239 return scratches;
240} 240}
241 241
242static void ipcomp_free_tfms(struct crypto_comp **tfms) 242static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms)
243{ 243{
244 struct ipcomp_tfms *pos; 244 struct ipcomp_tfms *pos;
245 int cpu; 245 int cpu;
@@ -267,10 +267,10 @@ static void ipcomp_free_tfms(struct crypto_comp **tfms)
267 free_percpu(tfms); 267 free_percpu(tfms);
268} 268}
269 269
270static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name) 270static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
271{ 271{
272 struct ipcomp_tfms *pos; 272 struct ipcomp_tfms *pos;
273 struct crypto_comp **tfms; 273 struct crypto_comp * __percpu *tfms;
274 int cpu; 274 int cpu;
275 275
276 /* This can be any valid CPU ID so we don't need locking. */ 276 /* This can be any valid CPU ID so we don't need locking. */
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 0ecb16a9a88..34a5ef8316e 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -556,6 +556,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
556 struct hlist_head *chain; 556 struct hlist_head *chain;
557 struct hlist_node *entry, *newpos; 557 struct hlist_node *entry, *newpos;
558 struct dst_entry *gc_list; 558 struct dst_entry *gc_list;
559 u32 mark = policy->mark.v & policy->mark.m;
559 560
560 write_lock_bh(&xfrm_policy_lock); 561 write_lock_bh(&xfrm_policy_lock);
561 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); 562 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
@@ -564,6 +565,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
564 hlist_for_each_entry(pol, entry, chain, bydst) { 565 hlist_for_each_entry(pol, entry, chain, bydst) {
565 if (pol->type == policy->type && 566 if (pol->type == policy->type &&
566 !selector_cmp(&pol->selector, &policy->selector) && 567 !selector_cmp(&pol->selector, &policy->selector) &&
568 (mark & pol->mark.m) == pol->mark.v &&
567 xfrm_sec_ctx_match(pol->security, policy->security) && 569 xfrm_sec_ctx_match(pol->security, policy->security) &&
568 !WARN_ON(delpol)) { 570 !WARN_ON(delpol)) {
569 if (excl) { 571 if (excl) {
@@ -635,8 +637,8 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
635} 637}
636EXPORT_SYMBOL(xfrm_policy_insert); 638EXPORT_SYMBOL(xfrm_policy_insert);
637 639
638struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir, 640struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
639 struct xfrm_selector *sel, 641 int dir, struct xfrm_selector *sel,
640 struct xfrm_sec_ctx *ctx, int delete, 642 struct xfrm_sec_ctx *ctx, int delete,
641 int *err) 643 int *err)
642{ 644{
@@ -650,6 +652,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
650 ret = NULL; 652 ret = NULL;
651 hlist_for_each_entry(pol, entry, chain, bydst) { 653 hlist_for_each_entry(pol, entry, chain, bydst) {
652 if (pol->type == type && 654 if (pol->type == type &&
655 (mark & pol->mark.m) == pol->mark.v &&
653 !selector_cmp(sel, &pol->selector) && 656 !selector_cmp(sel, &pol->selector) &&
654 xfrm_sec_ctx_match(ctx, pol->security)) { 657 xfrm_sec_ctx_match(ctx, pol->security)) {
655 xfrm_pol_hold(pol); 658 xfrm_pol_hold(pol);
@@ -676,8 +679,8 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
676} 679}
677EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 680EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
678 681
679struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id, 682struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
680 int delete, int *err) 683 int dir, u32 id, int delete, int *err)
681{ 684{
682 struct xfrm_policy *pol, *ret; 685 struct xfrm_policy *pol, *ret;
683 struct hlist_head *chain; 686 struct hlist_head *chain;
@@ -692,7 +695,8 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id,
692 chain = net->xfrm.policy_byidx + idx_hash(net, id); 695 chain = net->xfrm.policy_byidx + idx_hash(net, id);
693 ret = NULL; 696 ret = NULL;
694 hlist_for_each_entry(pol, entry, chain, byidx) { 697 hlist_for_each_entry(pol, entry, chain, byidx) {
695 if (pol->type == type && pol->index == id) { 698 if (pol->type == type && pol->index == id &&
699 (mark & pol->mark.m) == pol->mark.v) {
696 xfrm_pol_hold(pol); 700 xfrm_pol_hold(pol);
697 if (delete) { 701 if (delete) {
698 *err = security_xfrm_policy_delete( 702 *err = security_xfrm_policy_delete(
@@ -771,7 +775,8 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
771 775
772int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) 776int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
773{ 777{
774 int dir, err = 0; 778 int dir, err = 0, cnt = 0;
779 struct xfrm_policy *dp;
775 780
776 write_lock_bh(&xfrm_policy_lock); 781 write_lock_bh(&xfrm_policy_lock);
777 782
@@ -789,8 +794,10 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
789 &net->xfrm.policy_inexact[dir], bydst) { 794 &net->xfrm.policy_inexact[dir], bydst) {
790 if (pol->type != type) 795 if (pol->type != type)
791 continue; 796 continue;
792 __xfrm_policy_unlink(pol, dir); 797 dp = __xfrm_policy_unlink(pol, dir);
793 write_unlock_bh(&xfrm_policy_lock); 798 write_unlock_bh(&xfrm_policy_lock);
799 if (dp)
800 cnt++;
794 801
795 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, 802 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
796 audit_info->sessionid, 803 audit_info->sessionid,
@@ -809,8 +816,10 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
809 bydst) { 816 bydst) {
810 if (pol->type != type) 817 if (pol->type != type)
811 continue; 818 continue;
812 __xfrm_policy_unlink(pol, dir); 819 dp = __xfrm_policy_unlink(pol, dir);
813 write_unlock_bh(&xfrm_policy_lock); 820 write_unlock_bh(&xfrm_policy_lock);
821 if (dp)
822 cnt++;
814 823
815 xfrm_audit_policy_delete(pol, 1, 824 xfrm_audit_policy_delete(pol, 1,
816 audit_info->loginuid, 825 audit_info->loginuid,
@@ -824,6 +833,8 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
824 } 833 }
825 834
826 } 835 }
836 if (!cnt)
837 err = -ESRCH;
827 atomic_inc(&flow_cache_genid); 838 atomic_inc(&flow_cache_genid);
828out: 839out:
829 write_unlock_bh(&xfrm_policy_lock); 840 write_unlock_bh(&xfrm_policy_lock);
@@ -909,6 +920,7 @@ static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
909 int match, ret = -ESRCH; 920 int match, ret = -ESRCH;
910 921
911 if (pol->family != family || 922 if (pol->family != family ||
923 (fl->mark & pol->mark.m) != pol->mark.v ||
912 pol->type != type) 924 pol->type != type)
913 return ret; 925 return ret;
914 926
@@ -1033,6 +1045,10 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struc
1033 int err = 0; 1045 int err = 0;
1034 1046
1035 if (match) { 1047 if (match) {
1048 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1049 pol = NULL;
1050 goto out;
1051 }
1036 err = security_xfrm_policy_lookup(pol->security, 1052 err = security_xfrm_policy_lookup(pol->security,
1037 fl->secid, 1053 fl->secid,
1038 policy_to_flow_dir(dir)); 1054 policy_to_flow_dir(dir));
@@ -1045,6 +1061,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struc
1045 } else 1061 } else
1046 pol = NULL; 1062 pol = NULL;
1047 } 1063 }
1064out:
1048 read_unlock_bh(&xfrm_policy_lock); 1065 read_unlock_bh(&xfrm_policy_lock);
1049 return pol; 1066 return pol;
1050} 1067}
@@ -1137,6 +1154,7 @@ static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
1137 } 1154 }
1138 newp->lft = old->lft; 1155 newp->lft = old->lft;
1139 newp->curlft = old->curlft; 1156 newp->curlft = old->curlft;
1157 newp->mark = old->mark;
1140 newp->action = old->action; 1158 newp->action = old->action;
1141 newp->flags = old->flags; 1159 newp->flags = old->flags;
1142 newp->xfrm_nr = old->xfrm_nr; 1160 newp->xfrm_nr = old->xfrm_nr;
@@ -2045,8 +2063,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2045 int res; 2063 int res;
2046 2064
2047 if (xfrm_decode_session(skb, &fl, family) < 0) { 2065 if (xfrm_decode_session(skb, &fl, family) < 0) {
2048 /* XXX: we should have something like FWDHDRERROR here. */ 2066 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2049 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2050 return 0; 2067 return 0;
2051 } 2068 }
2052 2069
@@ -2421,19 +2438,19 @@ static int __net_init xfrm_statistics_init(struct net *net)
2421{ 2438{
2422 int rv; 2439 int rv;
2423 2440
2424 if (snmp_mib_init((void **)net->mib.xfrm_statistics, 2441 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
2425 sizeof(struct linux_xfrm_mib)) < 0) 2442 sizeof(struct linux_xfrm_mib)) < 0)
2426 return -ENOMEM; 2443 return -ENOMEM;
2427 rv = xfrm_proc_init(net); 2444 rv = xfrm_proc_init(net);
2428 if (rv < 0) 2445 if (rv < 0)
2429 snmp_mib_free((void **)net->mib.xfrm_statistics); 2446 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2430 return rv; 2447 return rv;
2431} 2448}
2432 2449
2433static void xfrm_statistics_fini(struct net *net) 2450static void xfrm_statistics_fini(struct net *net)
2434{ 2451{
2435 xfrm_proc_fini(net); 2452 xfrm_proc_fini(net);
2436 snmp_mib_free((void **)net->mib.xfrm_statistics); 2453 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2437} 2454}
2438#else 2455#else
2439static int __net_init xfrm_statistics_init(struct net *net) 2456static int __net_init xfrm_statistics_init(struct net *net)
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index fef8db553e8..58d9ae00559 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -15,7 +15,7 @@
15#include <net/snmp.h> 15#include <net/snmp.h>
16#include <net/xfrm.h> 16#include <net/xfrm.h>
17 17
18static struct snmp_mib xfrm_mib_list[] = { 18static const struct snmp_mib xfrm_mib_list[] = {
19 SNMP_MIB_ITEM("XfrmInError", LINUX_MIB_XFRMINERROR), 19 SNMP_MIB_ITEM("XfrmInError", LINUX_MIB_XFRMINERROR),
20 SNMP_MIB_ITEM("XfrmInBufferError", LINUX_MIB_XFRMINBUFFERERROR), 20 SNMP_MIB_ITEM("XfrmInBufferError", LINUX_MIB_XFRMINBUFFERERROR),
21 SNMP_MIB_ITEM("XfrmInHdrError", LINUX_MIB_XFRMINHDRERROR), 21 SNMP_MIB_ITEM("XfrmInHdrError", LINUX_MIB_XFRMINHDRERROR),
@@ -41,6 +41,7 @@ static struct snmp_mib xfrm_mib_list[] = {
41 SNMP_MIB_ITEM("XfrmOutPolBlock", LINUX_MIB_XFRMOUTPOLBLOCK), 41 SNMP_MIB_ITEM("XfrmOutPolBlock", LINUX_MIB_XFRMOUTPOLBLOCK),
42 SNMP_MIB_ITEM("XfrmOutPolDead", LINUX_MIB_XFRMOUTPOLDEAD), 42 SNMP_MIB_ITEM("XfrmOutPolDead", LINUX_MIB_XFRMOUTPOLDEAD),
43 SNMP_MIB_ITEM("XfrmOutPolError", LINUX_MIB_XFRMOUTPOLERROR), 43 SNMP_MIB_ITEM("XfrmOutPolError", LINUX_MIB_XFRMOUTPOLERROR),
44 SNMP_MIB_ITEM("XfrmFwdHdrError", LINUX_MIB_XFRMFWDHDRERROR),
44 SNMP_MIB_SENTINEL 45 SNMP_MIB_SENTINEL
45}; 46};
46 47
@@ -50,7 +51,8 @@ static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
50 int i; 51 int i;
51 for (i=0; xfrm_mib_list[i].name; i++) 52 for (i=0; xfrm_mib_list[i].name; i++)
52 seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name, 53 seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
53 snmp_fold_field((void **)net->mib.xfrm_statistics, 54 snmp_fold_field((void __percpu **)
55 net->mib.xfrm_statistics,
54 xfrm_mib_list[i].entry)); 56 xfrm_mib_list[i].entry));
55 return 0; 57 return 0;
56} 58}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index f445ea1c5f5..17d5b96f2fc 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -603,13 +603,14 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audi
603 603
604int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info) 604int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info)
605{ 605{
606 int i, err = 0; 606 int i, err = 0, cnt = 0;
607 607
608 spin_lock_bh(&xfrm_state_lock); 608 spin_lock_bh(&xfrm_state_lock);
609 err = xfrm_state_flush_secctx_check(net, proto, audit_info); 609 err = xfrm_state_flush_secctx_check(net, proto, audit_info);
610 if (err) 610 if (err)
611 goto out; 611 goto out;
612 612
613 err = -ESRCH;
613 for (i = 0; i <= net->xfrm.state_hmask; i++) { 614 for (i = 0; i <= net->xfrm.state_hmask; i++) {
614 struct hlist_node *entry; 615 struct hlist_node *entry;
615 struct xfrm_state *x; 616 struct xfrm_state *x;
@@ -626,13 +627,16 @@ restart:
626 audit_info->sessionid, 627 audit_info->sessionid,
627 audit_info->secid); 628 audit_info->secid);
628 xfrm_state_put(x); 629 xfrm_state_put(x);
630 if (!err)
631 cnt++;
629 632
630 spin_lock_bh(&xfrm_state_lock); 633 spin_lock_bh(&xfrm_state_lock);
631 goto restart; 634 goto restart;
632 } 635 }
633 } 636 }
634 } 637 }
635 err = 0; 638 if (cnt)
639 err = 0;
636 640
637out: 641out:
638 spin_unlock_bh(&xfrm_state_lock); 642 spin_unlock_bh(&xfrm_state_lock);
@@ -665,7 +669,7 @@ xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
665 return 0; 669 return 0;
666} 670}
667 671
668static struct xfrm_state *__xfrm_state_lookup(struct net *net, xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family) 672static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark, xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
669{ 673{
670 unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family); 674 unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
671 struct xfrm_state *x; 675 struct xfrm_state *x;
@@ -678,6 +682,8 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, xfrm_address_t *d
678 xfrm_addr_cmp(&x->id.daddr, daddr, family)) 682 xfrm_addr_cmp(&x->id.daddr, daddr, family))
679 continue; 683 continue;
680 684
685 if ((mark & x->mark.m) != x->mark.v)
686 continue;
681 xfrm_state_hold(x); 687 xfrm_state_hold(x);
682 return x; 688 return x;
683 } 689 }
@@ -685,7 +691,7 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, xfrm_address_t *d
685 return NULL; 691 return NULL;
686} 692}
687 693
688static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family) 694static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
689{ 695{
690 unsigned int h = xfrm_src_hash(net, daddr, saddr, family); 696 unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
691 struct xfrm_state *x; 697 struct xfrm_state *x;
@@ -698,6 +704,8 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, xfrm_addre
698 xfrm_addr_cmp(&x->props.saddr, saddr, family)) 704 xfrm_addr_cmp(&x->props.saddr, saddr, family))
699 continue; 705 continue;
700 706
707 if ((mark & x->mark.m) != x->mark.v)
708 continue;
701 xfrm_state_hold(x); 709 xfrm_state_hold(x);
702 return x; 710 return x;
703 } 711 }
@@ -709,12 +717,14 @@ static inline struct xfrm_state *
709__xfrm_state_locate(struct xfrm_state *x, int use_spi, int family) 717__xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
710{ 718{
711 struct net *net = xs_net(x); 719 struct net *net = xs_net(x);
720 u32 mark = x->mark.v & x->mark.m;
712 721
713 if (use_spi) 722 if (use_spi)
714 return __xfrm_state_lookup(net, &x->id.daddr, x->id.spi, 723 return __xfrm_state_lookup(net, mark, &x->id.daddr,
715 x->id.proto, family); 724 x->id.spi, x->id.proto, family);
716 else 725 else
717 return __xfrm_state_lookup_byaddr(net, &x->id.daddr, 726 return __xfrm_state_lookup_byaddr(net, mark,
727 &x->id.daddr,
718 &x->props.saddr, 728 &x->props.saddr,
719 x->id.proto, family); 729 x->id.proto, family);
720} 730}
@@ -779,6 +789,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
779 int acquire_in_progress = 0; 789 int acquire_in_progress = 0;
780 int error = 0; 790 int error = 0;
781 struct xfrm_state *best = NULL; 791 struct xfrm_state *best = NULL;
792 u32 mark = pol->mark.v & pol->mark.m;
782 793
783 to_put = NULL; 794 to_put = NULL;
784 795
@@ -787,6 +798,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
787 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 798 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
788 if (x->props.family == family && 799 if (x->props.family == family &&
789 x->props.reqid == tmpl->reqid && 800 x->props.reqid == tmpl->reqid &&
801 (mark & x->mark.m) == x->mark.v &&
790 !(x->props.flags & XFRM_STATE_WILDRECV) && 802 !(x->props.flags & XFRM_STATE_WILDRECV) &&
791 xfrm_state_addr_check(x, daddr, saddr, family) && 803 xfrm_state_addr_check(x, daddr, saddr, family) &&
792 tmpl->mode == x->props.mode && 804 tmpl->mode == x->props.mode &&
@@ -802,6 +814,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
802 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { 814 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
803 if (x->props.family == family && 815 if (x->props.family == family &&
804 x->props.reqid == tmpl->reqid && 816 x->props.reqid == tmpl->reqid &&
817 (mark & x->mark.m) == x->mark.v &&
805 !(x->props.flags & XFRM_STATE_WILDRECV) && 818 !(x->props.flags & XFRM_STATE_WILDRECV) &&
806 xfrm_state_addr_check(x, daddr, saddr, family) && 819 xfrm_state_addr_check(x, daddr, saddr, family) &&
807 tmpl->mode == x->props.mode && 820 tmpl->mode == x->props.mode &&
@@ -815,7 +828,7 @@ found:
815 x = best; 828 x = best;
816 if (!x && !error && !acquire_in_progress) { 829 if (!x && !error && !acquire_in_progress) {
817 if (tmpl->id.spi && 830 if (tmpl->id.spi &&
818 (x0 = __xfrm_state_lookup(net, daddr, tmpl->id.spi, 831 (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
819 tmpl->id.proto, family)) != NULL) { 832 tmpl->id.proto, family)) != NULL) {
820 to_put = x0; 833 to_put = x0;
821 error = -EEXIST; 834 error = -EEXIST;
@@ -829,6 +842,7 @@ found:
829 /* Initialize temporary selector matching only 842 /* Initialize temporary selector matching only
830 * to current session. */ 843 * to current session. */
831 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family); 844 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
845 memcpy(&x->mark, &pol->mark, sizeof(x->mark));
832 846
833 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid); 847 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
834 if (error) { 848 if (error) {
@@ -871,7 +885,7 @@ out:
871} 885}
872 886
873struct xfrm_state * 887struct xfrm_state *
874xfrm_stateonly_find(struct net *net, 888xfrm_stateonly_find(struct net *net, u32 mark,
875 xfrm_address_t *daddr, xfrm_address_t *saddr, 889 xfrm_address_t *daddr, xfrm_address_t *saddr,
876 unsigned short family, u8 mode, u8 proto, u32 reqid) 890 unsigned short family, u8 mode, u8 proto, u32 reqid)
877{ 891{
@@ -884,6 +898,7 @@ xfrm_stateonly_find(struct net *net,
884 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 898 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
885 if (x->props.family == family && 899 if (x->props.family == family &&
886 x->props.reqid == reqid && 900 x->props.reqid == reqid &&
901 (mark & x->mark.m) == x->mark.v &&
887 !(x->props.flags & XFRM_STATE_WILDRECV) && 902 !(x->props.flags & XFRM_STATE_WILDRECV) &&
888 xfrm_state_addr_check(x, daddr, saddr, family) && 903 xfrm_state_addr_check(x, daddr, saddr, family) &&
889 mode == x->props.mode && 904 mode == x->props.mode &&
@@ -946,11 +961,13 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
946 struct xfrm_state *x; 961 struct xfrm_state *x;
947 struct hlist_node *entry; 962 struct hlist_node *entry;
948 unsigned int h; 963 unsigned int h;
964 u32 mark = xnew->mark.v & xnew->mark.m;
949 965
950 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family); 966 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
951 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 967 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
952 if (x->props.family == family && 968 if (x->props.family == family &&
953 x->props.reqid == reqid && 969 x->props.reqid == reqid &&
970 (mark & x->mark.m) == x->mark.v &&
954 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) && 971 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
955 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family)) 972 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
956 x->genid = xfrm_state_genid; 973 x->genid = xfrm_state_genid;
@@ -967,11 +984,12 @@ void xfrm_state_insert(struct xfrm_state *x)
967EXPORT_SYMBOL(xfrm_state_insert); 984EXPORT_SYMBOL(xfrm_state_insert);
968 985
969/* xfrm_state_lock is held */ 986/* xfrm_state_lock is held */
970static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create) 987static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m, unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
971{ 988{
972 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family); 989 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
973 struct hlist_node *entry; 990 struct hlist_node *entry;
974 struct xfrm_state *x; 991 struct xfrm_state *x;
992 u32 mark = m->v & m->m;
975 993
976 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 994 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
977 if (x->props.reqid != reqid || 995 if (x->props.reqid != reqid ||
@@ -980,6 +998,7 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family
980 x->km.state != XFRM_STATE_ACQ || 998 x->km.state != XFRM_STATE_ACQ ||
981 x->id.spi != 0 || 999 x->id.spi != 0 ||
982 x->id.proto != proto || 1000 x->id.proto != proto ||
1001 (mark & x->mark.m) != x->mark.v ||
983 xfrm_addr_cmp(&x->id.daddr, daddr, family) || 1002 xfrm_addr_cmp(&x->id.daddr, daddr, family) ||
984 xfrm_addr_cmp(&x->props.saddr, saddr, family)) 1003 xfrm_addr_cmp(&x->props.saddr, saddr, family))
985 continue; 1004 continue;
@@ -1022,6 +1041,8 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family
1022 x->props.family = family; 1041 x->props.family = family;
1023 x->props.mode = mode; 1042 x->props.mode = mode;
1024 x->props.reqid = reqid; 1043 x->props.reqid = reqid;
1044 x->mark.v = m->v;
1045 x->mark.m = m->m;
1025 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; 1046 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1026 xfrm_state_hold(x); 1047 xfrm_state_hold(x);
1027 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL); 1048 tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
@@ -1038,7 +1059,7 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family
1038 return x; 1059 return x;
1039} 1060}
1040 1061
1041static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 seq); 1062static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1042 1063
1043int xfrm_state_add(struct xfrm_state *x) 1064int xfrm_state_add(struct xfrm_state *x)
1044{ 1065{
@@ -1046,6 +1067,7 @@ int xfrm_state_add(struct xfrm_state *x)
1046 struct xfrm_state *x1, *to_put; 1067 struct xfrm_state *x1, *to_put;
1047 int family; 1068 int family;
1048 int err; 1069 int err;
1070 u32 mark = x->mark.v & x->mark.m;
1049 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); 1071 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1050 1072
1051 family = x->props.family; 1073 family = x->props.family;
@@ -1063,7 +1085,7 @@ int xfrm_state_add(struct xfrm_state *x)
1063 } 1085 }
1064 1086
1065 if (use_spi && x->km.seq) { 1087 if (use_spi && x->km.seq) {
1066 x1 = __xfrm_find_acq_byseq(net, x->km.seq); 1088 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
1067 if (x1 && ((x1->id.proto != x->id.proto) || 1089 if (x1 && ((x1->id.proto != x->id.proto) ||
1068 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) { 1090 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1069 to_put = x1; 1091 to_put = x1;
@@ -1072,8 +1094,8 @@ int xfrm_state_add(struct xfrm_state *x)
1072 } 1094 }
1073 1095
1074 if (use_spi && !x1) 1096 if (use_spi && !x1)
1075 x1 = __find_acq_core(net, family, x->props.mode, x->props.reqid, 1097 x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
1076 x->id.proto, 1098 x->props.reqid, x->id.proto,
1077 &x->id.daddr, &x->props.saddr, 0); 1099 &x->id.daddr, &x->props.saddr, 0);
1078 1100
1079 __xfrm_state_bump_genids(x); 1101 __xfrm_state_bump_genids(x);
@@ -1147,6 +1169,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1147 goto error; 1169 goto error;
1148 } 1170 }
1149 1171
1172 memcpy(&x->mark, &orig->mark, sizeof(x->mark));
1173
1150 err = xfrm_init_state(x); 1174 err = xfrm_init_state(x);
1151 if (err) 1175 if (err)
1152 goto error; 1176 goto error;
@@ -1338,41 +1362,41 @@ int xfrm_state_check_expire(struct xfrm_state *x)
1338EXPORT_SYMBOL(xfrm_state_check_expire); 1362EXPORT_SYMBOL(xfrm_state_check_expire);
1339 1363
1340struct xfrm_state * 1364struct xfrm_state *
1341xfrm_state_lookup(struct net *net, xfrm_address_t *daddr, __be32 spi, u8 proto, 1365xfrm_state_lookup(struct net *net, u32 mark, xfrm_address_t *daddr, __be32 spi,
1342 unsigned short family) 1366 u8 proto, unsigned short family)
1343{ 1367{
1344 struct xfrm_state *x; 1368 struct xfrm_state *x;
1345 1369
1346 spin_lock_bh(&xfrm_state_lock); 1370 spin_lock_bh(&xfrm_state_lock);
1347 x = __xfrm_state_lookup(net, daddr, spi, proto, family); 1371 x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
1348 spin_unlock_bh(&xfrm_state_lock); 1372 spin_unlock_bh(&xfrm_state_lock);
1349 return x; 1373 return x;
1350} 1374}
1351EXPORT_SYMBOL(xfrm_state_lookup); 1375EXPORT_SYMBOL(xfrm_state_lookup);
1352 1376
1353struct xfrm_state * 1377struct xfrm_state *
1354xfrm_state_lookup_byaddr(struct net *net, 1378xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1355 xfrm_address_t *daddr, xfrm_address_t *saddr, 1379 xfrm_address_t *daddr, xfrm_address_t *saddr,
1356 u8 proto, unsigned short family) 1380 u8 proto, unsigned short family)
1357{ 1381{
1358 struct xfrm_state *x; 1382 struct xfrm_state *x;
1359 1383
1360 spin_lock_bh(&xfrm_state_lock); 1384 spin_lock_bh(&xfrm_state_lock);
1361 x = __xfrm_state_lookup_byaddr(net, daddr, saddr, proto, family); 1385 x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
1362 spin_unlock_bh(&xfrm_state_lock); 1386 spin_unlock_bh(&xfrm_state_lock);
1363 return x; 1387 return x;
1364} 1388}
1365EXPORT_SYMBOL(xfrm_state_lookup_byaddr); 1389EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1366 1390
1367struct xfrm_state * 1391struct xfrm_state *
1368xfrm_find_acq(struct net *net, u8 mode, u32 reqid, u8 proto, 1392xfrm_find_acq(struct net *net, struct xfrm_mark *mark, u8 mode, u32 reqid, u8 proto,
1369 xfrm_address_t *daddr, xfrm_address_t *saddr, 1393 xfrm_address_t *daddr, xfrm_address_t *saddr,
1370 int create, unsigned short family) 1394 int create, unsigned short family)
1371{ 1395{
1372 struct xfrm_state *x; 1396 struct xfrm_state *x;
1373 1397
1374 spin_lock_bh(&xfrm_state_lock); 1398 spin_lock_bh(&xfrm_state_lock);
1375 x = __find_acq_core(net, family, mode, reqid, proto, daddr, saddr, create); 1399 x = __find_acq_core(net, mark, family, mode, reqid, proto, daddr, saddr, create);
1376 spin_unlock_bh(&xfrm_state_lock); 1400 spin_unlock_bh(&xfrm_state_lock);
1377 1401
1378 return x; 1402 return x;
@@ -1419,7 +1443,7 @@ EXPORT_SYMBOL(xfrm_state_sort);
1419 1443
1420/* Silly enough, but I'm lazy to build resolution list */ 1444/* Silly enough, but I'm lazy to build resolution list */
1421 1445
1422static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 seq) 1446static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1423{ 1447{
1424 int i; 1448 int i;
1425 1449
@@ -1429,6 +1453,7 @@ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 seq)
1429 1453
1430 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) { 1454 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) {
1431 if (x->km.seq == seq && 1455 if (x->km.seq == seq &&
1456 (mark & x->mark.m) == x->mark.v &&
1432 x->km.state == XFRM_STATE_ACQ) { 1457 x->km.state == XFRM_STATE_ACQ) {
1433 xfrm_state_hold(x); 1458 xfrm_state_hold(x);
1434 return x; 1459 return x;
@@ -1438,12 +1463,12 @@ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 seq)
1438 return NULL; 1463 return NULL;
1439} 1464}
1440 1465
1441struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 seq) 1466struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1442{ 1467{
1443 struct xfrm_state *x; 1468 struct xfrm_state *x;
1444 1469
1445 spin_lock_bh(&xfrm_state_lock); 1470 spin_lock_bh(&xfrm_state_lock);
1446 x = __xfrm_find_acq_byseq(net, seq); 1471 x = __xfrm_find_acq_byseq(net, mark, seq);
1447 spin_unlock_bh(&xfrm_state_lock); 1472 spin_unlock_bh(&xfrm_state_lock);
1448 return x; 1473 return x;
1449} 1474}
@@ -1452,12 +1477,12 @@ EXPORT_SYMBOL(xfrm_find_acq_byseq);
1452u32 xfrm_get_acqseq(void) 1477u32 xfrm_get_acqseq(void)
1453{ 1478{
1454 u32 res; 1479 u32 res;
1455 static u32 acqseq; 1480 static atomic_t acqseq;
1456 static DEFINE_SPINLOCK(acqseq_lock); 1481
1482 do {
1483 res = atomic_inc_return(&acqseq);
1484 } while (!res);
1457 1485
1458 spin_lock_bh(&acqseq_lock);
1459 res = (++acqseq ? : ++acqseq);
1460 spin_unlock_bh(&acqseq_lock);
1461 return res; 1486 return res;
1462} 1487}
1463EXPORT_SYMBOL(xfrm_get_acqseq); 1488EXPORT_SYMBOL(xfrm_get_acqseq);
@@ -1470,6 +1495,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1470 int err = -ENOENT; 1495 int err = -ENOENT;
1471 __be32 minspi = htonl(low); 1496 __be32 minspi = htonl(low);
1472 __be32 maxspi = htonl(high); 1497 __be32 maxspi = htonl(high);
1498 u32 mark = x->mark.v & x->mark.m;
1473 1499
1474 spin_lock_bh(&x->lock); 1500 spin_lock_bh(&x->lock);
1475 if (x->km.state == XFRM_STATE_DEAD) 1501 if (x->km.state == XFRM_STATE_DEAD)
@@ -1482,7 +1508,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1482 err = -ENOENT; 1508 err = -ENOENT;
1483 1509
1484 if (minspi == maxspi) { 1510 if (minspi == maxspi) {
1485 x0 = xfrm_state_lookup(net, &x->id.daddr, minspi, x->id.proto, x->props.family); 1511 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
1486 if (x0) { 1512 if (x0) {
1487 xfrm_state_put(x0); 1513 xfrm_state_put(x0);
1488 goto unlock; 1514 goto unlock;
@@ -1492,7 +1518,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1492 u32 spi = 0; 1518 u32 spi = 0;
1493 for (h=0; h<high-low+1; h++) { 1519 for (h=0; h<high-low+1; h++) {
1494 spi = low + net_random()%(high-low+1); 1520 spi = low + net_random()%(high-low+1);
1495 x0 = xfrm_state_lookup(net, &x->id.daddr, htonl(spi), x->id.proto, x->props.family); 1521 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1496 if (x0 == NULL) { 1522 if (x0 == NULL) {
1497 x->id.spi = htonl(spi); 1523 x->id.spi = htonl(spi);
1498 break; 1524 break;
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
index 2e221f2cad7..2c4d6cdcba4 100644
--- a/net/xfrm/xfrm_sysctl.c
+++ b/net/xfrm/xfrm_sysctl.c
@@ -2,7 +2,7 @@
2#include <net/net_namespace.h> 2#include <net/net_namespace.h>
3#include <net/xfrm.h> 3#include <net/xfrm.h>
4 4
5static void __xfrm_sysctl_init(struct net *net) 5static void __net_init __xfrm_sysctl_init(struct net *net)
6{ 6{
7 net->xfrm.sysctl_aevent_etime = XFRM_AE_ETIME; 7 net->xfrm.sysctl_aevent_etime = XFRM_AE_ETIME;
8 net->xfrm.sysctl_aevent_rseqth = XFRM_AE_SEQT_SIZE; 8 net->xfrm.sysctl_aevent_rseqth = XFRM_AE_SEQT_SIZE;
@@ -64,7 +64,7 @@ out_kmemdup:
64 return -ENOMEM; 64 return -ENOMEM;
65} 65}
66 66
67void xfrm_sysctl_fini(struct net *net) 67void __net_exit xfrm_sysctl_fini(struct net *net)
68{ 68{
69 struct ctl_table *table; 69 struct ctl_table *table;
70 70
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d5a71297600..6106b72826d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -446,6 +446,8 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
446 goto error; 446 goto error;
447 } 447 }
448 448
449 xfrm_mark_get(attrs, &x->mark);
450
449 err = xfrm_init_state(x); 451 err = xfrm_init_state(x);
450 if (err) 452 if (err)
451 goto error; 453 goto error;
@@ -526,11 +528,13 @@ static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
526 int *errp) 528 int *errp)
527{ 529{
528 struct xfrm_state *x = NULL; 530 struct xfrm_state *x = NULL;
531 struct xfrm_mark m;
529 int err; 532 int err;
533 u32 mark = xfrm_mark_get(attrs, &m);
530 534
531 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) { 535 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
532 err = -ESRCH; 536 err = -ESRCH;
533 x = xfrm_state_lookup(net, &p->daddr, p->spi, p->proto, p->family); 537 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
534 } else { 538 } else {
535 xfrm_address_t *saddr = NULL; 539 xfrm_address_t *saddr = NULL;
536 540
@@ -541,7 +545,8 @@ static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
541 } 545 }
542 546
543 err = -ESRCH; 547 err = -ESRCH;
544 x = xfrm_state_lookup_byaddr(net, &p->daddr, saddr, 548 x = xfrm_state_lookup_byaddr(net, mark,
549 &p->daddr, saddr,
545 p->proto, p->family); 550 p->proto, p->family);
546 } 551 }
547 552
@@ -683,6 +688,9 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
683 if (x->encap) 688 if (x->encap)
684 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); 689 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
685 690
691 if (xfrm_mark_put(skb, &x->mark))
692 goto nla_put_failure;
693
686 if (x->security && copy_sec_ctx(x->security, skb) < 0) 694 if (x->security && copy_sec_ctx(x->security, skb) < 0)
687 goto nla_put_failure; 695 goto nla_put_failure;
688 696
@@ -947,6 +955,8 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
947 xfrm_address_t *daddr; 955 xfrm_address_t *daddr;
948 int family; 956 int family;
949 int err; 957 int err;
958 u32 mark;
959 struct xfrm_mark m;
950 960
951 p = nlmsg_data(nlh); 961 p = nlmsg_data(nlh);
952 err = verify_userspi_info(p); 962 err = verify_userspi_info(p);
@@ -957,8 +967,10 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
957 daddr = &p->info.id.daddr; 967 daddr = &p->info.id.daddr;
958 968
959 x = NULL; 969 x = NULL;
970
971 mark = xfrm_mark_get(attrs, &m);
960 if (p->info.seq) { 972 if (p->info.seq) {
961 x = xfrm_find_acq_byseq(net, p->info.seq); 973 x = xfrm_find_acq_byseq(net, mark, p->info.seq);
962 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) { 974 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
963 xfrm_state_put(x); 975 xfrm_state_put(x);
964 x = NULL; 976 x = NULL;
@@ -966,7 +978,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
966 } 978 }
967 979
968 if (!x) 980 if (!x)
969 x = xfrm_find_acq(net, p->info.mode, p->info.reqid, 981 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
970 p->info.id.proto, daddr, 982 p->info.id.proto, daddr,
971 &p->info.saddr, 1, 983 &p->info.saddr, 1,
972 family); 984 family);
@@ -1220,6 +1232,8 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_us
1220 if (err) 1232 if (err)
1221 goto error; 1233 goto error;
1222 1234
1235 xfrm_mark_get(attrs, &xp->mark);
1236
1223 return xp; 1237 return xp;
1224 error: 1238 error:
1225 *errp = err; 1239 *errp = err;
@@ -1366,10 +1380,13 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
1366 goto nlmsg_failure; 1380 goto nlmsg_failure;
1367 if (copy_to_user_policy_type(xp->type, skb) < 0) 1381 if (copy_to_user_policy_type(xp->type, skb) < 0)
1368 goto nlmsg_failure; 1382 goto nlmsg_failure;
1383 if (xfrm_mark_put(skb, &xp->mark))
1384 goto nla_put_failure;
1369 1385
1370 nlmsg_end(skb, nlh); 1386 nlmsg_end(skb, nlh);
1371 return 0; 1387 return 0;
1372 1388
1389nla_put_failure:
1373nlmsg_failure: 1390nlmsg_failure:
1374 nlmsg_cancel(skb, nlh); 1391 nlmsg_cancel(skb, nlh);
1375 return -EMSGSIZE; 1392 return -EMSGSIZE;
@@ -1441,6 +1458,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1441 int err; 1458 int err;
1442 struct km_event c; 1459 struct km_event c;
1443 int delete; 1460 int delete;
1461 struct xfrm_mark m;
1462 u32 mark = xfrm_mark_get(attrs, &m);
1444 1463
1445 p = nlmsg_data(nlh); 1464 p = nlmsg_data(nlh);
1446 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY; 1465 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
@@ -1454,7 +1473,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1454 return err; 1473 return err;
1455 1474
1456 if (p->index) 1475 if (p->index)
1457 xp = xfrm_policy_byid(net, type, p->dir, p->index, delete, &err); 1476 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err);
1458 else { 1477 else {
1459 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1478 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1460 struct xfrm_sec_ctx *ctx; 1479 struct xfrm_sec_ctx *ctx;
@@ -1471,8 +1490,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1471 if (err) 1490 if (err)
1472 return err; 1491 return err;
1473 } 1492 }
1474 xp = xfrm_policy_bysel_ctx(net, type, p->dir, &p->sel, ctx, 1493 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel,
1475 delete, &err); 1494 ctx, delete, &err);
1476 security_xfrm_policy_free(ctx); 1495 security_xfrm_policy_free(ctx);
1477 } 1496 }
1478 if (xp == NULL) 1497 if (xp == NULL)
@@ -1524,8 +1543,11 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1524 audit_info.sessionid = NETLINK_CB(skb).sessionid; 1543 audit_info.sessionid = NETLINK_CB(skb).sessionid;
1525 audit_info.secid = NETLINK_CB(skb).sid; 1544 audit_info.secid = NETLINK_CB(skb).sid;
1526 err = xfrm_state_flush(net, p->proto, &audit_info); 1545 err = xfrm_state_flush(net, p->proto, &audit_info);
1527 if (err) 1546 if (err) {
1547 if (err == -ESRCH) /* empty table */
1548 return 0;
1528 return err; 1549 return err;
1550 }
1529 c.data.proto = p->proto; 1551 c.data.proto = p->proto;
1530 c.event = nlh->nlmsg_type; 1552 c.event = nlh->nlmsg_type;
1531 c.seq = nlh->nlmsg_seq; 1553 c.seq = nlh->nlmsg_seq;
@@ -1541,6 +1563,7 @@ static inline size_t xfrm_aevent_msgsize(void)
1541 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id)) 1563 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
1542 + nla_total_size(sizeof(struct xfrm_replay_state)) 1564 + nla_total_size(sizeof(struct xfrm_replay_state))
1543 + nla_total_size(sizeof(struct xfrm_lifetime_cur)) 1565 + nla_total_size(sizeof(struct xfrm_lifetime_cur))
1566 + nla_total_size(sizeof(struct xfrm_mark))
1544 + nla_total_size(4) /* XFRM_AE_RTHR */ 1567 + nla_total_size(4) /* XFRM_AE_RTHR */
1545 + nla_total_size(4); /* XFRM_AE_ETHR */ 1568 + nla_total_size(4); /* XFRM_AE_ETHR */
1546} 1569}
@@ -1573,6 +1596,9 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_eve
1573 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH, 1596 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH,
1574 x->replay_maxage * 10 / HZ); 1597 x->replay_maxage * 10 / HZ);
1575 1598
1599 if (xfrm_mark_put(skb, &x->mark))
1600 goto nla_put_failure;
1601
1576 return nlmsg_end(skb, nlh); 1602 return nlmsg_end(skb, nlh);
1577 1603
1578nla_put_failure: 1604nla_put_failure:
@@ -1588,6 +1614,8 @@ static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1588 struct sk_buff *r_skb; 1614 struct sk_buff *r_skb;
1589 int err; 1615 int err;
1590 struct km_event c; 1616 struct km_event c;
1617 u32 mark;
1618 struct xfrm_mark m;
1591 struct xfrm_aevent_id *p = nlmsg_data(nlh); 1619 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1592 struct xfrm_usersa_id *id = &p->sa_id; 1620 struct xfrm_usersa_id *id = &p->sa_id;
1593 1621
@@ -1595,7 +1623,9 @@ static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1595 if (r_skb == NULL) 1623 if (r_skb == NULL)
1596 return -ENOMEM; 1624 return -ENOMEM;
1597 1625
1598 x = xfrm_state_lookup(net, &id->daddr, id->spi, id->proto, id->family); 1626 mark = xfrm_mark_get(attrs, &m);
1627
1628 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
1599 if (x == NULL) { 1629 if (x == NULL) {
1600 kfree_skb(r_skb); 1630 kfree_skb(r_skb);
1601 return -ESRCH; 1631 return -ESRCH;
@@ -1626,6 +1656,8 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1626 struct xfrm_state *x; 1656 struct xfrm_state *x;
1627 struct km_event c; 1657 struct km_event c;
1628 int err = - EINVAL; 1658 int err = - EINVAL;
1659 u32 mark = 0;
1660 struct xfrm_mark m;
1629 struct xfrm_aevent_id *p = nlmsg_data(nlh); 1661 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1630 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; 1662 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
1631 struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; 1663 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
@@ -1637,7 +1669,9 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1637 if (!(nlh->nlmsg_flags&NLM_F_REPLACE)) 1669 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
1638 return err; 1670 return err;
1639 1671
1640 x = xfrm_state_lookup(net, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family); 1672 mark = xfrm_mark_get(attrs, &m);
1673
1674 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
1641 if (x == NULL) 1675 if (x == NULL)
1642 return -ESRCH; 1676 return -ESRCH;
1643 1677
@@ -1676,8 +1710,12 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1676 audit_info.sessionid = NETLINK_CB(skb).sessionid; 1710 audit_info.sessionid = NETLINK_CB(skb).sessionid;
1677 audit_info.secid = NETLINK_CB(skb).sid; 1711 audit_info.secid = NETLINK_CB(skb).sid;
1678 err = xfrm_policy_flush(net, type, &audit_info); 1712 err = xfrm_policy_flush(net, type, &audit_info);
1679 if (err) 1713 if (err) {
1714 if (err == -ESRCH) /* empty table */
1715 return 0;
1680 return err; 1716 return err;
1717 }
1718
1681 c.data.type = type; 1719 c.data.type = type;
1682 c.event = nlh->nlmsg_type; 1720 c.event = nlh->nlmsg_type;
1683 c.seq = nlh->nlmsg_seq; 1721 c.seq = nlh->nlmsg_seq;
@@ -1696,13 +1734,15 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1696 struct xfrm_userpolicy_info *p = &up->pol; 1734 struct xfrm_userpolicy_info *p = &up->pol;
1697 u8 type = XFRM_POLICY_TYPE_MAIN; 1735 u8 type = XFRM_POLICY_TYPE_MAIN;
1698 int err = -ENOENT; 1736 int err = -ENOENT;
1737 struct xfrm_mark m;
1738 u32 mark = xfrm_mark_get(attrs, &m);
1699 1739
1700 err = copy_from_user_policy_type(&type, attrs); 1740 err = copy_from_user_policy_type(&type, attrs);
1701 if (err) 1741 if (err)
1702 return err; 1742 return err;
1703 1743
1704 if (p->index) 1744 if (p->index)
1705 xp = xfrm_policy_byid(net, type, p->dir, p->index, 0, &err); 1745 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err);
1706 else { 1746 else {
1707 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1747 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1708 struct xfrm_sec_ctx *ctx; 1748 struct xfrm_sec_ctx *ctx;
@@ -1719,7 +1759,8 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1719 if (err) 1759 if (err)
1720 return err; 1760 return err;
1721 } 1761 }
1722 xp = xfrm_policy_bysel_ctx(net, type, p->dir, &p->sel, ctx, 0, &err); 1762 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir,
1763 &p->sel, ctx, 0, &err);
1723 security_xfrm_policy_free(ctx); 1764 security_xfrm_policy_free(ctx);
1724 } 1765 }
1725 if (xp == NULL) 1766 if (xp == NULL)
@@ -1759,8 +1800,10 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1759 int err; 1800 int err;
1760 struct xfrm_user_expire *ue = nlmsg_data(nlh); 1801 struct xfrm_user_expire *ue = nlmsg_data(nlh);
1761 struct xfrm_usersa_info *p = &ue->state; 1802 struct xfrm_usersa_info *p = &ue->state;
1803 struct xfrm_mark m;
1804 u32 mark = xfrm_mark_get(attrs, &m);;
1762 1805
1763 x = xfrm_state_lookup(net, &p->id.daddr, p->id.spi, p->id.proto, p->family); 1806 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
1764 1807
1765 err = -ENOENT; 1808 err = -ENOENT;
1766 if (x == NULL) 1809 if (x == NULL)
@@ -1794,6 +1837,7 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1794 struct xfrm_user_tmpl *ut; 1837 struct xfrm_user_tmpl *ut;
1795 int i; 1838 int i;
1796 struct nlattr *rt = attrs[XFRMA_TMPL]; 1839 struct nlattr *rt = attrs[XFRMA_TMPL];
1840 struct xfrm_mark mark;
1797 1841
1798 struct xfrm_user_acquire *ua = nlmsg_data(nlh); 1842 struct xfrm_user_acquire *ua = nlmsg_data(nlh);
1799 struct xfrm_state *x = xfrm_state_alloc(net); 1843 struct xfrm_state *x = xfrm_state_alloc(net);
@@ -1802,6 +1846,8 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1802 if (!x) 1846 if (!x)
1803 goto nomem; 1847 goto nomem;
1804 1848
1849 xfrm_mark_get(attrs, &mark);
1850
1805 err = verify_newpolicy_info(&ua->policy); 1851 err = verify_newpolicy_info(&ua->policy);
1806 if (err) 1852 if (err)
1807 goto bad_policy; 1853 goto bad_policy;
@@ -1814,7 +1860,8 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1814 memcpy(&x->id, &ua->id, sizeof(ua->id)); 1860 memcpy(&x->id, &ua->id, sizeof(ua->id));
1815 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr)); 1861 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
1816 memcpy(&x->sel, &ua->sel, sizeof(ua->sel)); 1862 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
1817 1863 xp->mark.m = x->mark.m = mark.m;
1864 xp->mark.v = x->mark.v = mark.v;
1818 ut = nla_data(rt); 1865 ut = nla_data(rt);
1819 /* extract the templates and for each call km_key */ 1866 /* extract the templates and for each call km_key */
1820 for (i = 0; i < xp->xfrm_nr; i++, ut++) { 1867 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
@@ -2054,6 +2101,10 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
2054#undef XMSGSIZE 2101#undef XMSGSIZE
2055 2102
2056static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { 2103static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
2104 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
2105 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
2106 [XFRMA_LASTUSED] = { .type = NLA_U64},
2107 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
2057 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) }, 2108 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
2058 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) }, 2109 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
2059 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) }, 2110 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
@@ -2070,6 +2121,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
2070 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)}, 2121 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
2071 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) }, 2122 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
2072 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) }, 2123 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
2124 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
2073}; 2125};
2074 2126
2075static struct xfrm_link { 2127static struct xfrm_link {
@@ -2149,7 +2201,8 @@ static void xfrm_netlink_rcv(struct sk_buff *skb)
2149 2201
2150static inline size_t xfrm_expire_msgsize(void) 2202static inline size_t xfrm_expire_msgsize(void)
2151{ 2203{
2152 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire)); 2204 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
2205 + nla_total_size(sizeof(struct xfrm_mark));
2153} 2206}
2154 2207
2155static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c) 2208static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
@@ -2165,7 +2218,13 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_eve
2165 copy_to_user_state(x, &ue->state); 2218 copy_to_user_state(x, &ue->state);
2166 ue->hard = (c->data.hard != 0) ? 1 : 0; 2219 ue->hard = (c->data.hard != 0) ? 1 : 0;
2167 2220
2221 if (xfrm_mark_put(skb, &x->mark))
2222 goto nla_put_failure;
2223
2168 return nlmsg_end(skb, nlh); 2224 return nlmsg_end(skb, nlh);
2225
2226nla_put_failure:
2227 return -EMSGSIZE;
2169} 2228}
2170 2229
2171static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c) 2230static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
@@ -2177,8 +2236,10 @@ static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
2177 if (skb == NULL) 2236 if (skb == NULL)
2178 return -ENOMEM; 2237 return -ENOMEM;
2179 2238
2180 if (build_expire(skb, x, c) < 0) 2239 if (build_expire(skb, x, c) < 0) {
2181 BUG(); 2240 kfree_skb(skb);
2241 return -EMSGSIZE;
2242 }
2182 2243
2183 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 2244 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2184} 2245}
@@ -2266,6 +2327,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
2266 if (c->event == XFRM_MSG_DELSA) { 2327 if (c->event == XFRM_MSG_DELSA) {
2267 len += nla_total_size(headlen); 2328 len += nla_total_size(headlen);
2268 headlen = sizeof(*id); 2329 headlen = sizeof(*id);
2330 len += nla_total_size(sizeof(struct xfrm_mark));
2269 } 2331 }
2270 len += NLMSG_ALIGN(headlen); 2332 len += NLMSG_ALIGN(headlen);
2271 2333
@@ -2336,6 +2398,7 @@ static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
2336{ 2398{
2337 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire)) 2399 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
2338 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 2400 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2401 + nla_total_size(sizeof(struct xfrm_mark))
2339 + nla_total_size(xfrm_user_sec_ctx_size(x->security)) 2402 + nla_total_size(xfrm_user_sec_ctx_size(x->security))
2340 + userpolicy_type_attrsize(); 2403 + userpolicy_type_attrsize();
2341} 2404}
@@ -2368,9 +2431,12 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2368 goto nlmsg_failure; 2431 goto nlmsg_failure;
2369 if (copy_to_user_policy_type(xp->type, skb) < 0) 2432 if (copy_to_user_policy_type(xp->type, skb) < 0)
2370 goto nlmsg_failure; 2433 goto nlmsg_failure;
2434 if (xfrm_mark_put(skb, &xp->mark))
2435 goto nla_put_failure;
2371 2436
2372 return nlmsg_end(skb, nlh); 2437 return nlmsg_end(skb, nlh);
2373 2438
2439nla_put_failure:
2374nlmsg_failure: 2440nlmsg_failure:
2375 nlmsg_cancel(skb, nlh); 2441 nlmsg_cancel(skb, nlh);
2376 return -EMSGSIZE; 2442 return -EMSGSIZE;
@@ -2457,6 +2523,7 @@ static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
2457 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire)) 2523 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
2458 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 2524 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2459 + nla_total_size(xfrm_user_sec_ctx_size(xp->security)) 2525 + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
2526 + nla_total_size(sizeof(struct xfrm_mark))
2460 + userpolicy_type_attrsize(); 2527 + userpolicy_type_attrsize();
2461} 2528}
2462 2529
@@ -2479,10 +2546,13 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2479 goto nlmsg_failure; 2546 goto nlmsg_failure;
2480 if (copy_to_user_policy_type(xp->type, skb) < 0) 2547 if (copy_to_user_policy_type(xp->type, skb) < 0)
2481 goto nlmsg_failure; 2548 goto nlmsg_failure;
2549 if (xfrm_mark_put(skb, &xp->mark))
2550 goto nla_put_failure;
2482 upe->hard = !!hard; 2551 upe->hard = !!hard;
2483 2552
2484 return nlmsg_end(skb, nlh); 2553 return nlmsg_end(skb, nlh);
2485 2554
2555nla_put_failure:
2486nlmsg_failure: 2556nlmsg_failure:
2487 nlmsg_cancel(skb, nlh); 2557 nlmsg_cancel(skb, nlh);
2488 return -EMSGSIZE; 2558 return -EMSGSIZE;
@@ -2519,6 +2589,7 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *
2519 headlen = sizeof(*id); 2589 headlen = sizeof(*id);
2520 } 2590 }
2521 len += userpolicy_type_attrsize(); 2591 len += userpolicy_type_attrsize();
2592 len += nla_total_size(sizeof(struct xfrm_mark));
2522 len += NLMSG_ALIGN(headlen); 2593 len += NLMSG_ALIGN(headlen);
2523 2594
2524 skb = nlmsg_new(len, GFP_ATOMIC); 2595 skb = nlmsg_new(len, GFP_ATOMIC);
@@ -2554,10 +2625,14 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *
2554 if (copy_to_user_policy_type(xp->type, skb) < 0) 2625 if (copy_to_user_policy_type(xp->type, skb) < 0)
2555 goto nlmsg_failure; 2626 goto nlmsg_failure;
2556 2627
2628 if (xfrm_mark_put(skb, &xp->mark))
2629 goto nla_put_failure;
2630
2557 nlmsg_end(skb, nlh); 2631 nlmsg_end(skb, nlh);
2558 2632
2559 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2633 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2560 2634
2635nla_put_failure:
2561nlmsg_failure: 2636nlmsg_failure:
2562 kfree_skb(skb); 2637 kfree_skb(skb);
2563 return -1; 2638 return -1;